# -*- coding: utf-8 -*-
'''
Created on Mar 25, 2013

@author: LONG HOANG GIANG
'''

import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from urlparse import urljoin
import pyLib
import re
import workerpool
import json
import logging

logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d/%m/%Y %H:%M:%S')


def get(item):
    logging.info('> get detail from {0}'.format(item['url']))
    url = item['url']
    tree = pyLib.loadWeb(url, cached=True).build_tree()
    contentNode = tree.xpath("//div[@id='content']//table[2]")[0]
    html = pyLib.cleanHTML(pyLib.Etree.tostring(contentNode)).strip()
    if html == '': raise Exception, "CONTENT NULL - URL: {0}".format(item['url'])
    print html
    html = re.sub(r"\n", "<br />", html)
    html = "<b>{0}</b><br /><hr /><br />{1}".format(item['name'], html)
    pyLib.gzip(OUTPUT + 'files' + pyLib.PATH_SEPARATE + item['path'], json.dumps(html))

def process(url):
    
    global OUTPUT
    OUTPUT = "/longhoanggiang/truyentext/{0}/".format(pyLib.extractText(r"/([^/]+)\.html", url, 1))
    
    data = []
    while True:
        print '> process URL: {0}'.format(url)
        tree = pyLib.loadWeb(url).build_tree()
        xpathList = ["//div[@class='bt_pagination'][1]/following-sibling::div[@class='danh_sach']/a", "//div[@id='truyen_tranh_chi_tiet']//div[@class='danh_sach']/a"]
        for xpathStr in xpathList:
            nodes = tree.xpath(xpathStr)
            for node in nodes:
                title = re.sub(r"--\d+--", "", pyLib.stringify(node)).replace(' : ', ': ')
                nodeLink = node.get('href', '')
                if nodeLink == '': continue
                nodeLink = urljoin(url, nodeLink)
                data.append({'name': title, 'url': nodeLink, 'path': pyLib.crc32unsigned(nodeLink)})
            if len(nodes) > 0: break
        nextPageNode = tree.xpath("//div[@class='bt_pagination'][1]/div[@class='active']/following-sibling::*[1]/a")
        if len(nextPageNode) == 0: break
        nextPageNode = nextPageNode[0]
        href = nextPageNode.get('href', '').strip()
        if href == '': break
        url = urljoin(url, href)
        
    pool = workerpool.WorkerPool(size=2)
    pool.map(get, data)
    pool.wait()
    pool.shutdown()
    
    for item in data:
        del item['url']
    jsData = pyLib.encryptCipher(json.dumps(data))
    pyLib.gzip(OUTPUT + 'data', jsData)
    

if __name__ == '__main__':
    
    url = raw_input("Enter url: ")
    if url.startswith("http"):
        process(url)
    process(url)
#    get({'title': 'Chuong 80', 'url': 'http://truyen2.hixx.info/truyen/truyen-kiem-hiep/174643/Quan-Lam-Thien-Ha/Chuong-146No-phat-nhu-cuong.html', 'id': 'test'})
    logging.info('> Finished')
    os._exit(1)
    

                