# -*- coding: utf-8 -*-
'''
Created on Aug 19, 2013

@author: LONG HOANG GIANG
'''
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import pyLib
import json
import re
import gzip
import logging
import workerpool
from urlparse import urljoin

logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d/%m/%Y %H:%M:%S')


class MyJob(workerpool.Job):
    
    def __init__(self, func, arg):
        self._func = func
        self._arg = arg
        
    def run(self):
        self._func(self._arg)
        

class DocTruyen360():
    
    __output_folder = '/longhoanggiang/truyentext'
    
    def getDetail(self, item):
        logging.info("Get detail of [{0}]".format(item['name']))
        html = pyLib.loadWeb(item['url']).gethtml()
        content = pyLib.getHTMLContent(html)
        tree = pyLib.buildTree(content)
        pyLib.Etree.cleanNodeNextSibling(tree.xpath("//strong[contains(., 'Đọc tiếp')]/..".decode('utf-8')), True)
        pyLib.Etree.cleanNodeNextSibling(tree.xpath("//*[@class='cleaner']"), True)
        content = pyLib.Etree.tostring(tree.xpath("//body/*"))
        print content
        pyLib.gz_file_put_content(self.__output_folder + "/files/" + item['path'], json.dumps(content))

    
    def getLink(self, url):
        data = []
        tree = pyLib.loadWeb(url).build_tree()
        for aNode in tree.xpath("//div[@class='entry']//li/a"):
            title = pyLib.stringify(aNode)
            link = aNode.get('href', '')
            if link == '': continue
            link = urljoin(url, link)
            data.append({'name': title, 'url': link, 'path': pyLib.crc32unsigned(link)})
        data.reverse()
        return data
    
    def process(self, url):
        data = []
        self.__output_folder = self.__output_folder + "/" + pyLib.extractText(r"/doc-truyen-([^/]+)", url[0], 1)
        pyLib.createIfNotExistsPath(self.__output_folder + "/files")
        print self.__output_folder
        for _url in url:
            chapters = self.getLink(_url)
            for chapter in chapters:
                data.append(chapter)
        
        pool = workerpool.WorkerPool(size=5)
        for chapter in data:
            pool.put(MyJob(self.getDetail, chapter))
        pool.shutdown()
        pool.wait()
        
        for d in data:
            del d['url']
        pyLib.gz_file_put_content(self.__output_folder + "/data", pyLib.encryptCipher(json.dumps(data)))
        
            
if __name__ == '__main__':
    
    
    url = ['http://www.doctruyen360.com/doc-truyen-boss-qua-gian-xao-full/'
           ]
    t = DocTruyen360()
    t.process(url)
#    t.getDetail({'url': 'http://www.doctruyen360.com/hop-dong-hon-nhan-100-ngay-chuong-267/', 'name': 'CHAA'})
    
    logging.info("finished")
    os._exit(1)
        