# -*- coding: utf-8 -*-
'''
Created on 13-11-2012

@author: LONG HOANG GIANG
'''

import sys
import os
from urlparse import urljoin
import workerpool
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from CrawlerLib import commonlib, html2text
from Model.storymodelv3 import StoryModel
import datetime
import re

class TruyenHixx():
    
    def __init__(self, url):
        self.url = url
    
    def getListChapter(self):
        maxPage = self.detectPages()
        data = []
        for page in range(1, maxPage + 1):
            url = self.buildPageURL(page)
            tree = commonlib.loadweb(url).build_tree()
            listXpath = ["//div[contains(., 'Danh sách chương')]/following-sibling::div[@class='danh_sach']/a".decode('utf-8'),
                         "//div[@class='danh_sach']/a"]
            for xpathStr in listXpath:
                items = tree.xpath(xpathStr)
                for item in items:
                    href = item.get('href', '').strip()
                    if href == '': continue
                    href = urljoin(url, href)
                    title = commonlib.stringify(item)
                    data.append({'title': title, 'url': href})
                if len(items) > 0: break
        return data
        
    def getDetail(self, url):
        tree = commonlib.loadweb(url).build_tree()
        if tree == None:
            if 'truyen2.hixx.info' in url:
                url = url.replace('truyen2', 'truyen')
            elif 'truyen.hixx.info' in url:
                url = url.replace('truyen', 'truyen2')
            tree = commonlib.loadweb(url).build_tree()
        
        print commonlib.Etree.tostring(tree) 
        contentNode = tree.xpath("//td[@id='ar-content-html' and @class='chi_tiet']")
        commonlib.Etree.clean_elements(contentNode, ['embed', 'script', 'noscript'])
        html = html2text.html2text(commonlib.Etree.tostring(contentNode)).strip()
        html = re.sub(ur"\n", "<br />", html)
        html = re.sub(ur"<br /><br /> <br /><br />", "<br /><br />", html)
        html = re.sub(ur"--> *<br /><br />", "", html)
        return html
    
    def buildPageURL(self, page):
        url = self.url
        if page == 1: return url
        if not re.search(r"index\d+\.html", url):
            url = url.replace('.html', '/index{0}.html'.format(page))
        else:
            url = re.sub(r"index\d+\.html", "index{0}.html".format(page))
        return url
            
    def detectPages(self):
        tree = commonlib.loadweb(self.url).build_tree()
        node = tree.xpath("//div[@class='bt_pagination']/div[@class='next']/following-sibling::*[1]/a")
        if len(node) == 0: return 1
        node = node[0]
        pageNum = int(float(commonlib.extractText("index(\d+)\.html", node.get('href'), 1, 0)))
        return pageNum
    
    def getStoriesOfCat(self):
        output = '/longhoanggiang/truyentinhcam/'
        maxPage = 2
        data = []
        for page in range(1, maxPage+1):
            url = 'http://truyen2.hixx.info/truyen/truyen-tinh-cam/page/{_page}'.format(_page=page)
            tree = commonlib.loadweb(url).build_tree()
            for node in tree.xpath("//ul[@class='top']/li/a"):
                title = commonlib.stringify(node)
                href = node.get('href', '').strip()
                if href == '': continue
                href = urljoin(url, href)
                storyId = commonlib.crc32unsigned(href)
                print storyId, title, href
                data.append({'title': title, 'url': href, 'id': storyId})
        
        smodel = StoryModel('/longhoanggiang/database/truyentinhyeu')
        smodel.open(True)
        for story in data:
            print '>>> loading {0}'.format(story['title'])
            
        
    
    def process(self):
        
        smodel = StoryModel(DATABASE)
        smodel.open(True)
        
        data = self.getListChapter()
#        def loadWeb(item):
#            commonlib.loadweb(item['url'])
#        pool = workerpool.WorkerPool(size=2)
#        pool.map(loadWeb, data)
#        pool.shutdown()
#        pool.wait()

        for item in data:
            commonlib.loadweb(item['url'])
        
        for item in data:
            detail = self.getDetail(item['url'])
            detail = re.sub(ur"^<br[^>]*> *<br[^>]*>", "", detail)
            title = re.sub(r"--\d+--", "", item['title'].replace(' : ', ': '))
            title = commonlib.toUpper(title)
            if APPEND_TITLE:
                detail = '''<b>{0}</b><br /><hr /><br />{1}'''.format(title, detail)
            print "\t\t\tTITLE: {0}".format(title)
            print "\t\t\tDETAIL: {0}".format(detail[:150])
            smodel.add_story(title, detail, 0)
        smodel.close()
    

if __name__ == '__main__':
    
    DATABASE = ''
    APPEND_TITLE = True
#    jobs = {
#            #'vuthan': 'http://truyen2.hixx.info/truyen/truyen-kiem-hiep/92899/Vu-Than.html',
#            'linhlagioi': 'http://truyen2.hixx.info/truyen/truyen-kiem-hiep/121376/Linh-La-Gioi.html',
#            #'vinhsinh': 'http://truyen.hixx.info/truyen/truyen-kiem-hiep/105326/Vinh-Sinh.html',
#            #'batdiettt': 'http://truyen.hixx.info/truyen/truyen-kiem-hiep/100040/Bat-Diet-Truyen-Thuyet.html'
#            #'cucphamtv': 'http://truyen.hixx.info/truyen/truyen-kiem-hiep/155041/Cuc-Pham-Thien-Vuong.html'
#            }
#
#    for database, url in jobs.items():
#        DATABASE = "/longhoanggiang/database/{0}".format(database)
#        a = TruyenHixx(url)
#        a.process()
    
    a = TruyenHixx('')
#    a.getStoriesOfCat()
    a.getDetail('http://truyen2.hixx.info/truyen/truyen-kiem-hiep/174643/Quan-Lam-Thien-Ha/Chuong-146No-phat-nhu-cuong.html')
    print '> Finished at {0}'.format(datetime.datetime.now())
    os._exit(1)