# -*- coding: utf-8 -*-
'''
Created on Oct 4, 2013

@author: LONG HOANG GIANG
'''
from urlparse import urljoin
import logging
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import json
import workerpool

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

class MyJob(workerpool.Job):
    
    def __init__(self, func, chapter):
        self.func = func
        self.chapter = chapter
        
    def run(self):
        self.func(self.chapter)


class TungHoanh():
    
    catFolder = ""
    
    
    def getChapters(self, url):
        logging.info("getChapters - url: {0}".format(url))
        data = []
        while url != '':
            tree = lib.Web.load(url).build_tree()
            nodes = tree.xpath("//div[@id='table-cm']/li")
            if len(nodes) == 0:
                lib.Web.removeCache(url)
                tree = lib.Web.load(url).build_tree()
            for item in nodes:
                node = item.xpath("./div[1]/a")[0]
                name = lib.stringify(node)
                link = node.get('href')
                if name == '' or link == '': continue
                name = lib.toUpper(name)
                link = urljoin(url, link)
                print name, link
                data.append({'name': name, 'url': link, 'path': lib.crc32unsigned(link)})
            
            nextNode = tree.xpath("//div[@id='pagingControls']/span/following-sibling::a[1]")
            if len(nextNode) == 0: break
            link = nextNode[0].get('href')
            if link == '': break
            url = urljoin(url, link)
        data.reverse()
        return data
    
    
    def getDetail(self, chapter):
        print '> get detail of {0} with url {1}'.format(chapter['name'], chapter['url'])
        fd = lib.Web.load(chapter['url'])
        cookie = fd.get_cookie()
        chapterId = lib.extractText(r"/([^/]+)$", chapter['url'], 1)
        post = lib.Web.load("http://tunghoanh.com/chapter/{0}".format(chapterId), referer=chapter['url'], reqAjax=True, cookie=cookie)
        content = lib.extractText(r"<[^>]+>(.+)</body>", post.get_article_content(), 1)
        content = re.sub(r"<br />(\s*<br />\s*)+<br />", "<br /><br />", content)
        print content[:200]
        content = '''<h4>{0}</h4><div id="content">{1}</div>'''.format(chapter['name'], content)
        path = self.storyPath + "/files/" + chapter['path']
        lib.gz_file_put_content(json.dumps(content), path)
        
            
    def processStory(self, url):
        path = lib.extractText(r"/([^/]+)-[^-]+\.html", url, 1).strip()
        self.storyPath = "/longhoanggiang/truyentext/{0}".format(path) if self.catFolder == '' else "/longhoanggiang/truyentext/{0}".format(self.catFolder).format(path)
        print self.storyPath
        lib.makedir(self.storyPath + "/files")
        chapters = self.getChapters(url)
        pool = workerpool.WorkerPool(size=5)
        for chapter in chapters:
            pool.put(MyJob(self.getDetail, chapter))
        pool.shutdown()
        pool.wait()
        for c in chapters:
            del c['url']
        if len(chapters) > 0:
            lib.gz_file_put_content(lib.encryptCipher(json.dumps(chapters)), self.storyPath + "/data")
        


    def processCat(self, url):
        if not url.endswith("/"): url += "/"
        catPath = lib.extractText(r"/([^/]+)/$", url, 1)
        self.catFolder = "__tunghoanh/" + catPath + "/{0}"
        catPath = "__tunghoanh/" + catPath
        lib.makedir(catPath)
        stories = []
        while url != '':
            
            tree = lib.Web.load(url).build_tree()
            for item in tree.xpath("//ul[@class='table-cm']/li[position()>1]"):
                node = item.xpath("./div[contains(@class, 'name td')]//a")[0]
                name = lib.stringify(node)
                link = node.get('href')
                if name == '' or link == '': continue
                link = urljoin(url, link)
                author = lib.stringify(item.xpath("./div[@class='author td']"))
                path = lib.extractText(r"/([^/]+)-[^-]+\.html", link, 1).strip()
                print name, author, path, link
                stories.append({'name': name, 'url': link, 'author': author, 'path': path})
            nextNode = tree.xpath("//div[@class='paging']/a[@class='pagecurrent']/following-sibling::*[1]")
            if len(nextNode) == 0: break
            link = nextNode[0].get('href')
            if link == '': break
            url = urljoin(url, link)
            
        for story in stories:
            self.processStory(story['url'])
            del story['url']
        
        lib.gz_file_put_content(lib.encryptCipher(json.dumps(stories)), "/longhoanggiang/truyentext/{0}".format(catPath + "/data"))





if __name__ == '__main__':
    
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--cat", action="store_true", default=False)
    args = parser.parse_args()
    url = raw_input("Enter URL: ")
    if url.startswith("http"):
        t = TungHoanh()
        if args.cat:
            t.processCat(url)
        else:
            t.processStory(url)
    logging.info("finished")
    os._exit(1)