# -*- encoding: utf-8 -*-
'''
Created on Feb 1, 2013

@author: LONG HOANG GIANG
'''

import datetime
import os
import sys
import re
from urlparse import urljoin
import workerpool
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from CrawlerLib2 import commonlib, html2text
from workerpool import WorkerPool
from Model.storymodelv3 import StoryModel


cookiestr = 'fontcolor=%23000000; backcolor=%23FFFFFF; ann_show=1; __utma=180264865.1128926995.1359687100.1359703416.1359800826.4; __utmz=180264865.1359687123.2.2.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); location.href=1; PHPSESSID=t0h4dj6aro9gsnt9p6iv3tpbl5; ad_play_index=73; __utmb=180264865.2.10.1359800826; __utmc=180264865'
sUrl = 'http://tunghoanh.com/co-vo-tong-giam-doc-xinh-dep-cua-toi-lgaaaab.html'


def fetchChapter(chapid, referer):
    url = 'http://tunghoanh.com/chapter/{0}.html'.format(chapid)
    html = "<html>" + commonlib.loadweb(url, nocache=True, cookie=cookiestr, referer=referer, ajax=True).gethtml() + "</html>"
    tree = commonlib.buildTreeFromHtml(html)
    h = html2text.html2text(commonlib.Etree.tostring(tree.xpath("//body"))).encode('utf-8')
    html = h.strip()
    for match in re.finditer("(c|C)hương \d+", html):
        if match.start() > -1:
            html = html[match.start():]
    return html.strip()
    
def getChapter(item, referer):
    tree = commonlib.loadweb(item['url'], cookie=cookiestr, referer='http://tunghoanh.com/co-vo-tong-giam-doc-xinh-dep-cua-toi-lgaaaab.html').build_tree()
    contentNode = tree.xpath("//div[@id='chapter_content']")[0]
    content = commonlib.stringify(contentNode)
    chapid = commonlib.extractText("FetchChapter\(\"(.+)\"\)", content, 1)
    html = fetchChapter(chapid, item['url'])
    html = re.sub("( *\<br /\> *){3,}", "<br /><br />", re.sub(r"\n", "<br />", html))
    return html

def getAllChapter():
    url = 'http://tunghoanh.com/co-vo-tong-giam-doc-xinh-dep-cua-toi-lgaaaab.html'
    tree = commonlib.loadweb(url, cookie=cookiestr).build_tree()
    data = []
    for item in tree.xpath("//div[@class='info_bullet']/following-sibling::div[@class='story_chapter']/div[@class='chapter']//a"):
        title = commonlib.stringify(item)
        href = urljoin(url, commonlib.normalize_str(item.get('href', '')))
        if href == '': continue
        data.append({'title': title, 'url': href})
    
#    pool = WorkerPool(size=1)
#    for item in data:
#        pool.put(WorkerJob(item))
#    pool.shutdown()
#    pool.wait()
    
#    for i in range(1064, 1131):
#        item = data[i]
#        detail = getChapter(item, sUrl)
#        print detail[:200]
    
    smodel = StoryModel(DATABASE)
    smodel.open()
    for i in range(1064, 1131):
        item = data[i]
        detail = getChapter(item, sUrl)
        print detail[:200]
        if detail.strip() == '': raise Exception, 'NULL CONTENT'
        smodel.add_story(commonlib.toUpper(item['title']), detail, 0)
    smodel.close()
        
class WorkerJob(workerpool.Job):     
    
    def __init__(self, item):
        self.item = item
    
    def run(self):
        print getChapter(self.item, sUrl)
        

if __name__ == '__main__':
    
    DATABASE = '/longhoanggiang/database/{0}'.format('cvtgdxd')
    getAllChapter()
    
    print '> Finished at {0}'.format(datetime.datetime.now())