# -*- coding: utf-8 -*-
'''
Created on Nov 8, 2013

@author: LONG HOANG GIANG
'''
from urlparse import urljoin
from lxml import etree
import logging
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import json
import workerpool
import re

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

class MyJob(workerpool.Job):
    
    def __init__(self, func, chapter):
        self.func = func
        self.chapter = chapter
        
    def run(self):
        self.func(self.chapter)
        

class SanTruyen():
    
    datapath = '/longhoanggiang/truyentext'
    
    def __init__(self):
        pass
            
    def getDetail(self, item):
        url = item['url']
        tree = lib.Web.load(url).build_tree()
        contentNode = tree.xpath("//div[@class='contents-comic'][2]")[0]
        html = lib.Etree.tostring(contentNode)
        html = re.sub(r"\<span class=\"copyright_san_truyen\"\>([^<]+)</span>", "", html)
        text = lib.html2text(html)
        text = '''<h4>{0}</h4><div id='content'>{1}</div>'''.format(item['name'], text)
        print text
        lib.gz_file_put_content(json.dumps(text), self.datapath + "/files/" + item['path'])
        
    def getChapters(self, url):
        tree = lib.Web.load(url).build_tree()
        data = []
        for node in tree.xpath("//div[@id='_pchapter']/ul//a"):
            name = lib.stringify(node)
            link = node.get('href')
            if link == '': continue
            link = urljoin(url, link)
            name = lib.toUpper(name)
            data.append({'name': name, 'url': link, 'path': lib.crc32unsigned(link)})
        return data
    
    def process(self, url):
        storyname = lib.extractText(r"santruyen\.com/(.+)\.html", url, 1)
        self.datapath = self.datapath + "/" + storyname
        print '>>>> data path: ' + self.datapath
        lib.makedir(self.datapath + "/files")
        chapters = self.getChapters(url)
        pool = workerpool.WorkerPool(size=5)
        for chapter in chapters:
            pool.put(MyJob(self.getDetail, chapter))
        pool.shutdown()
        pool.wait()
        for chapter in chapters:
            del chapter['url']
        lib.gz_file_put_content(lib.encryptCipher(json.dumps(chapters)), self.datapath + "/data")
        

if __name__ == '__main__':
    
    url = raw_input("Enter URL: ")
    if url.startswith('http:'):
        t = SanTruyen()
        t.process(url)
    else:
        logging.warning("invalid url")
    logging.info("finished")
    os._exit(1)        