# -*- coding: utf-8 -*-
'''
Created on Oct 9, 2013

@author: LONG HOANG GIANG
'''
from urlparse import urljoin
import logging
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import json
import workerpool

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

class MyJob(workerpool.Job):
    
    def __init__(self, func, chapter):
        self.func = func
        self.chapter = chapter
        
    def run(self):
        self.func(self.chapter)
        
class Site():
    
    catFolder = ""
    
    def getDetail(self, chapter):
        print '> get detail of {0} with url {1}'.format(chapter['name'], chapter['url'])
        tree = lib.Web.load(chapter['url']).build_tree()
        contentNode = tree.xpath("//div[@class='contentboxcontent']/div/div[2]")[0]
        content = lib.html2text(contentNode)
        content = re.sub(r"\n", "<br />", content)
        print content
        content = '''<h4>{0}</h4><div id="content">{1}</div>'''.format(chapter['name'], content)
        path = self.storyPath + "/files/" + chapter['path']
        lib.gz_file_put_content(json.dumps(content), path)
    
    
    def getChapters(self, url):
        logging.info("getChapters - url: {0}".format(url))
        data = []
        tree = lib.Web.load(url).build_tree()
        nodes = tree.xpath("//select[@class='selectchapter']/option")
        if len(nodes) == 0:
            lib.Web.removeCache(url)
            tree = lib.Web.load(url).build_tree()
        for node in nodes:
            name = lib.stringify(node)
            link = node.get('value')
            if name == '' or link == '': continue
            name = lib.toUpper(name)
            link = urljoin(url, link)
            print name, link
            data.append({'name': name, 'url': link, 'path': lib.crc32unsigned(link)})
        #data.reverse()
        return data
    
    def processStory(self, url):
        path = lib.extractText(r"/([^/]+)\.html", url, 1).strip()
        self.storyPath = "/longhoanggiang/truyentext/{0}".format(path) if self.catFolder == '' else "/longhoanggiang/truyentext/{0}".format(self.catFolder).format(path)
        print self.storyPath
        lib.makedir(self.storyPath + "/files")
        chapters = self.getChapters(url)
        pool = workerpool.WorkerPool(size=5)
        for chapter in chapters:
            pool.put(MyJob(self.getDetail, chapter))
        pool.shutdown()
        pool.wait()
        for c in chapters:
            del c['url']
        if len(chapters) > 0:
            lib.gz_file_put_content(lib.encryptCipher(json.dumps(chapters)), self.storyPath + "/data")


if __name__ == '__main__':
    
    url = raw_input("Enter URL: ")
    if url.startswith("http"):
        t = Site()
        t.processStory(url)
    else:
        logging.warn("Invalid url")
    logging.info("finished")
    os._exit(1)