# -*- coding: utf-8 -*-
'''
Created on Apr 22, 2013

@author: LONG HOANG GIANG
'''
import os, sys
from urlparse import urljoin
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import pyLib
import logging
import workerpool
import json
import re
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

##############################################
class Job(workerpool.Job):
    
    def __init__(self, function, *args, **kwargs):
        self.args = args
        self.kwargs = kwargs
        self.function = function
    
    def run(self):
        self.function(*self.args, **self.kwargs)
##############################################        

def detail(item, path):
    logging.info("detail {0}".format(item['name']))
    tree = pyLib.loadWeb(item['url']).build_tree()
    contentNode = tree.xpath("//div[@class='contentboxcontent']")[0]
    pyLib.Etree.clean(contentNode.xpath(".//b[contains(., 'Xem nhanh: ')]"))
    pyLib.Etree.clean(contentNode.xpath(".//select[contains(@class, 'selectchapter')]"))
    pyLib.Etree.clean(contentNode.xpath(".//table[@class='chaptercontrol']"))
    pyLib.Etree.clean(contentNode.xpath(".//table[@class='source']"))
    text = pyLib.cleanHTML(pyLib.Etree.tostring(contentNode), keep=['b', 'strong']).strip()
    text = re.sub(r"\n", "<br />", text)
    text = """<b>{0}</b><br /><hr /><br />{1}""".format(item['name'], text)
    pyLib.gzip(path + "/" + item['path'], json.dumps(text))

def chapters(url):
    logging.info("start story({0})".format(url))
    tree = pyLib.loadWeb(url).build_tree()
    data = []
    for node in tree.xpath("//select[@class='selectchapter']/option"):
        name = pyLib.stringify(node)
        name = re.sub(r": +:", "", name).strip()
        href = node.get('value', '')
        if href == '': continue
        name = pyLib.toUpper(name)
        href = urljoin(url, href)
        data.append({'name': name, 'path': pyLib.crc32unsigned(href), 'url': href})
    return data
        
def story(url):
    if type(url).__name__ == 'list':
        for iurl in url:
            story(iurl)
        return
    logging.info("start story({0})".format(url))
    path = "/longhoanggiang/truyentext/{0}".format(pyLib.extractText(r"/([^/]+)\.html", url, 1))
    subpath = path + "/files"
    pyLib.createIfNotExistsPath(subpath)
    data = []
    tree = pyLib.loadWeb(url).build_tree()
    pool = workerpool.WorkerPool(size=5)
    for node in tree.xpath("//select[@class='selectchapter']/option"):
        name = pyLib.stringify(node)
        name = re.sub(r": +:", "", name).strip()
        href = node.get('value', '')
        if href == '': continue
        if re.search(r"[\-]+", name):
            name = "GIỚI THIỆU"
        name = pyLib.toUpper(name)
        href = urljoin(url, href)
        data.append({'name': name, 'path': pyLib.crc32unsigned(href)})
        pool.put(Job(detail, {'name': name, 'url': href, 'path': pyLib.crc32unsigned(href)}, subpath))
    pool.shutdown()
    pool.wait()
    pyLib.gz_file_put_content(path + "/data", pyLib.encryptCipher(json.dumps(data)))


def cat(url):
    path = "/longhoanggiang/truyentext/__{0}".format(pyLib.extractText(r"/([^/]+)\.html", url, 1))


if __name__ == '__main__':
    
    import argparse
    parser = argparse.ArgumentParser()
    try:
        parser.add_argument('-u', '--url', help='crawl list of story', action='append')
        args = parser.parse_args()
        if len(args.url) > 0:
            story(args.url)
        logging.info("finished")
    except:
        print sys.exc_info()[1]
        parser.print_help()
    os._exit(1)