# -*- coding: utf-8 -*-
'''
Created on May 5, 2013

@author: LONG HOANG GIANG
'''
import os, sys
from urlparse import urljoin
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import pyLib
import logging
import workerpool
import json
import re
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d/%m/%Y %H:%M:%S')


##############################################
class Job(workerpool.Job):
    
    def __init__(self, function, *args, **kwargs):
        self.args = args
        self.kwargs = kwargs
        self.function = function
    
    def run(self):
        self.function(*self.args, **self.kwargs)
##############################################   

class Crawler():
    
    _config_path = '/longhoanggiang/truyentext/4phuong'
    
    def getListStories(self, catUrl):
        url = catUrl
        doTime = 0
        bookies = []
        while doTime < 100:
            doTime += 1
            tree = pyLib.loadWeb(url).build_tree()
            for node in tree.xpath("//table[@class='ebookboxitem']/tr[@class='IntroBookItemRow']"):
                tNode = node.xpath(".//div[@class='IntroBookTitle']/a")[0]
                title = pyLib.stringify(tNode)
                href = tNode.get('href')
                if href == '': continue
                href = urljoin(catUrl, href)
                thumbNode = node.xpath(".//div[@class='bg_bookimg']//a/img")
                thumbnail = ''
                if len(thumbNode) > 0:
                    
                    thumbnail = thumbNode[0].get('src')
                    if thumbnail != '':
                        thumbnail = urljoin(catUrl, thumbnail)
                        
                authorNode = node.xpath(".//div[@class='IntroBookAuthor']")
                author = pyLib.stringify(authorNode)
                bookies.append({'name': title, 'author': author, 'thumbnail': thumbnail, 'url': href, 'path': pyLib.crc32unsigned(href)})
                print '-------------------------'
                print title, author
                print thumbnail
                print '-------------------------'
            pageNode = tree.xpath("//div[@class='catepagination']//b/span[@class='splitactive']/../following-sibling::*[1]")
            if len(pageNode) == 0: break
            href = pageNode[0].get('href')
            if href == '': break
            url = urljoin(catUrl, href)
        return bookies
    
    def detail(self, item, path):
        print path
        logging.info("detail {0}".format(item['name']))
        tree = pyLib.loadWeb(item['url']).build_tree()
        contentNode = tree.xpath("//div[@class='contentboxcontent']")[0]
        pyLib.Etree.clean(contentNode.xpath(".//b[contains(., 'Xem nhanh: ')]"))
        pyLib.Etree.clean(contentNode.xpath(".//select[contains(@class, 'selectchapter')]"))
        pyLib.Etree.clean(contentNode.xpath(".//table[@class='chaptercontrol']"))
        pyLib.Etree.clean(contentNode.xpath(".//table[@class='source']"))
        text = pyLib.cleanHTML(pyLib.Etree.tostring(contentNode), keep=['b', 'strong']).strip()
        print text
        text = re.sub(r"\n", "<br />", text)
        text = """<b>{0}</b><br /><hr /><br />{1}""".format(item['name'], text)
        pyLib.gzip(path + "/" + item['path'], json.dumps(text))
                   
    def getChapters(self, item):
        url = item['url']
        tree = pyLib.loadWeb(url).build_tree()
        data = []
        for node in tree.xpath("//select[@class='selectchapter']/option"):
            name = pyLib.stringify(node)
            name = re.sub(r": +:", "", name).strip()
            href = node.get('value', '')
            if href == '': continue
            name = pyLib.toUpper(name)
            href = urljoin(url, href)
            data.append({'name': name, 'path': pyLib.crc32unsigned(href), 'url': href})
        return data
        
    
    def processStory(self, item):
        logging.info("processStory({0}:{1})".format(item['name'], item['path']))
        path = self._config_path + "/" + item['path']
        pyLib.createIfNotExistsPath(path + "/files")
        chapters = self.getChapters(item)
        
        print '------------------------------------------'
        for chapter in chapters:
            print chapter['name']
        print '------------------------------------------'    
        
        if len(chapters) == 0: raise
        
        if len(chapters) > 0: 
            for chapter in chapters:
                print chapter
            
            pool = workerpool.WorkerPool(size=5)
            for chapter in chapters:
                pool.put(Job(self.detail, chapter, path + "/files"))
            pool.shutdown()
            pool.wait()
            
            for item in chapters:
                del item['url']
            pyLib.gz_file_put_content(path + "/data", pyLib.encryptCipher(json.dumps(chapters)))
            
            return True
        return False
        
    
    def processCat(self, catUrl):
        catId = pyLib.extractText(r"4phuong.net/[^/]+/\d+/([^/]+)\.html", catUrl, 1)
        if re.search(r"4phuong.net/[^/]+/\d+/([^/]+)/\d+\.html", catUrl):
            catId = pyLib.extractText(r"4phuong.net/[^/]+/\d+/([^/]+)/?", catUrl, 1)
        self._config_path = "{0}/{1}".format(self._config_path, catId)
        stories = self.getListStories(catUrl)
        data = []
        for story in stories:
            aa = self.processStory(story)
            print '----------------------------------------'
            print aa
            print '----------------------------------------'
            if aa:
                data.append(story)
        
        for s in data:
            del s['url']
        
        pyLib.gz_file_put_content(self._config_path + "/data", pyLib.encryptCipher(json.dumps(data)))
        
        
if __name__ == '__main__':
    
    c = Crawler()
#    c.processStory({'name': 'Qua tang', 'url': 'http://4phuong.net/ebook/14709692/qua-tang-cuoc-song.html', 'path': 'abcedf'})
    c.processCat('http://4phuong.net/subcate/14/suy-ngam-lam-nguoi.html')
    
    logging.info("finished")
    os._exit(1)