# -*- coding: utf-8 -*-
'''
Created on Mar 25, 2013

@author: LONG HOANG GIANG
'''
from urlparse import urljoin
import re
import workerpool
import datetime
import os
import pyLib
import json

URL_CUOI_DAN_GIAN = 'http://www.truyencuoivova.net/category/truy%E1%BB%87n-c%C6%B0%E1%BB%9Di-dan-gian/page/{0}'
URL_CUOI_VOVA = 'http://www.truyencuoivova.net/category/truy%E1%BB%87n-c%C6%B0%E1%BB%9Di-vova-full/page/{0}/'
URL_TIEU_LAM = 'http://www.truyencuoivova.net/category/truyen-cuoi-tieu-lam/page/{0}/'

def get(item):
    print '> get {0}'.format(item['url'])
    tree = pyLib.loadWeb(item['url'], cached=True).build_tree()
    entryNode = tree.xpath("//div[@class='entry']")[0]
    pyLib.Etree.cleanNodeNextSibling(entryNode.xpath("./p/span[contains(., 'Tags : ')]/.."))
    pyLib.Etree.cleanNodeNextSibling(entryNode.xpath("./div[contains(., 'VN:F [') and @style='display: none']"))
    html = pyLib.cleanHTML(pyLib.Etree.tostring(entryNode).encode('utf-8'), ['b', 'strong'])
    print html
    html = re.sub(r"\n", "<br />", html)
    pyLib.gzip(OUTPUT + 'files' + pyLib.PATH_SEPARATE + item['id'], json.dumps(html))

def process(fm, minPage, maxPage):
    data = []
    idx = 0
    for page in range(minPage, maxPage+1):
        url = fm.format(page)
        tree = pyLib.loadWeb(url).build_tree()
        print '-== PAGE: {0} ==-'.format(page)
        for item in tree.xpath("//div[@id='contentwrap']//div[@id='content']//div[contains(@class, 'post-')]/h2/a"):
            try:
                title = pyLib.stringify(item)
                href = item.get('href', '').strip()
                if href == '': continue
                href = urljoin(url, href)
                print title, href
                iitem = {'title': title, 'url': href, 'id': pyLib.crc32unsigned(href)}
                get(iitem)
                data.append(iitem)
                idx += 1
            except:
                pass
#    pool = workerpool.WorkerPool(size=3)
#    pool.map(get, data)
#    pool.shutdown()
#    pool.wait()
    for item in data:
        del item['url']
    pyLib.gzip(OUTPUT + 'data', json.dumps(data))
    
        

if __name__ == '__main__':
    
    OUTPUT = '/longhoanggiang/truyentext/tieulam/'
    pyLib.createIfNotExistsPath(OUTPUT + 'files' + pyLib.PATH_SEPARATE)
    process(URL_TIEU_LAM, 1, 21)
    print '> Finished at {0}'.format(datetime.datetime.now())
    os._exit(1)
    