# -*- coding: utf-8 -*-
'''
Created on Sep 7, 2013

@author: LONG HOANG GIANG
'''

from urlparse import urljoin
import logging
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import json
import workerpool

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')


class Thread(workerpool.Job):
    
    def __init__(self, func, chapter, path):
        self.func = func
        self.chapter = chapter
        self.path = path
    
    def run(self):
        self.func(self.chapter, self.path)


class Alobooks():
    
    def validateReadingUrl(self, url):
        if re.search(r"/doc-sach-truc-tuyen/\d+/[^/]+.html", url):
            return True
        return False
    
    def getListChapter(self, url):
        #if not self.validateReadingUrl(url):
        #    logging.warn("Not invalid reading url")
        #    return
        tree = lib.Web.load(url).build_tree()
        chapters = [{'name': 'GIỚI THIỆU', 'url': url, 'path': 'gioithieu'}]
        for node in tree.xpath("//div[contains(@id, 'book-navigation')]//li/a"):
            name = lib.stringify(node)
            if type(name).__name__ == 'unicode':
                name = name.encode('utf-8')
#             name = re.sub(r"\: ", ": ", name)
            name = lib.toUpper(name)
            link = node.get('href', '')
            if link == '': continue
            link = urljoin(url, link)
            chapters.append({'name': name, 'url': link, 'path': lib.crc32unsigned(link)})
            print name, link
        return chapters
        
    def getDetail(self, item, path):
        logging.info("get detail chapter: {0} in url: {1}".format(item['name'], item['url']))
        url = item['url']
        tree = lib.Web.load(url).build_tree()
        if re.search(r"doc-sach-truc-tuyen/\d+/", url):
            contentNode = tree.xpath("//div[@class='content node-doc-truc-tuyen']/div[1]")[0]
        else:
            for xp in ["//div[@class='content node-book']", "//div[@class='content node-forum']"]:
                contentNode = tree.xpath(xp)
                if len(contentNode) > 0: 
                    contentNode = contentNode[0]
                    break
            lib.Etree.cleanNode(".//div[contains(@id, 'book-navigation-')]", contentNode)
        text = lib.html2text(contentNode)
        text = '''<h4 class='title'>{0}</h4><div id="content">{1}</div>'''.format(item['name'], text)
        print text[:200]
        
        lib.gz_file_put_content(json.dumps(text), path + "/files/" + item['path'])
        
        return text
    
    def crawlOneStory(self, url):
        chapters = self.getListChapter(url)
        folder = "/longhoanggiang/truyentext/{0}".format(lib.extractText(r"/\d+/([^/]+)\.html", url, 1))
        lib.makedir(folder + '/' + 'files')
        
        pool = workerpool.WorkerPool(size=5)
         
        for chapter in chapters:
            pool.put(Thread(self.getDetail, chapter, folder))
        pool.shutdown()
        pool.wait()
             
        for chapter in chapters:
            del chapter['url']
        lib.gz_file_put_content(lib.encryptCipher(json.dumps(chapters)), folder + "/data")


if __name__ == '__main__':
    
    url = raw_input("Enter URL: ")
    if url.startswith("http://"):
        alobooks = Alobooks()
        alobooks.crawlOneStory(url)
        
        logging.info("finished")
    else:
        logging.warning("Invalid URL")
    os._exit(1)