# -*- coding: utf-8 -*-
'''
Created on 20-12-2012

@author: LONG HOANG GIANG
'''
import os
import sys
import re
import workerpool
from CrawlerLib2 import commonlib, html2text
from Model.storymodelv2 import StoryModel

class Crawler():
    
    startPage = 1
    lastPage = 1
    
    def __init__(self):
        self.urlfm = self.getURLFormat()
        self.lastPage = self.detectLastPage()
        
    def detectLastPage(self):
        
        tree = commonlib.loadweb(URL).build_tree()
        aLastNode = tree.xpath("//div[@id='pagination_top']//span[@class='first_last']/a[contains(., 'Cuối')]".decode('utf-8'))[0]
        href = aLastNode.get('href', '').strip()
        if href == '': raise Exception, 'Detect Lastpage Error'
        page = int(float(commonlib.extractText("/page(\d+)", href, 1)))
        return page
        
    def getURLFormat(self):
        url = URL
        if re.search("\.html/page\d+", URL):
            url = commonlib.extractText("(.+)/page\d+", URL, 1)
        urlfm = url + "/page{0}"
        return urlfm
    
    def preloadHtml(self):
        
        urls = []
        for page in range(self.startPage, self.lastPage + 1):
            urls.append(self.urlfm.format(page))
        
        pool = workerpool.WorkerPool(size=10)
        pool.map(commonlib.loadweb, urls)
        pool.shutdown()
        pool.wait()
        
    
    def normalizeChapter(self, chapter):
        if type(chapter).__name__ == 'unicode':
            chapter = chapter.encode('utf-8', 'ignore')
        if re.search(r"Chương", chapter):
            chapter = re.sub(r"(Chương *)(\d+)(.+)", lambda m: "{0}{1}{2}".format(m.group(1), int(float(m.group(2))), m.group(3)), chapter)
        chapter = re.sub(r" *Nhóm dịch.+", "", chapter)
        return chapter
    
    def postthank(self, url):
        pass
    
    def getDetail(self, url):
        
        tree = commonlib.loadweb(url).build_tree()
        
        html = commonlib.Etree.tostring(tree)
        print html
        securityToken = commonlib.extractText(ur'''var SECURITYTOKEN = "([0-9-]+)";''', html, 1)
        print securityToken
        data = []
        for node in tree.xpath("//div[@id='postlist']//li[contains(@id, 'post_')]"):
            unode = node.xpath(".//div[@class='userinfo']//div[@class='username_container']/div/a")
            username = commonlib.stringify(unode).strip()
            if username == '': continue
            nodecontent = node.xpath(".//div[contains(@id, 'post_message')]")[0]
#            print html2text.html2text(commonlib.Etree.tostring(nodecontent))
            
            metaNode = nodecontent.xpath("./blockquote[@class='postcontent restore']/div")[0]
            t = commonlib.stringify(metaNode)
            t = re.sub(r"-----oo0oo----- *", "", t)
            t = re.sub(r"(Chuơng)|(Chuong)|(chương)|(Chương)", "Chương", t)
#            print t
            
            title = commonlib.extractText("(Chương.+)\n?", t, 1).strip()
            if title == '':
                title = commonlib.extractText("(Giới Thiệu)", t, 1).strip()
            title = self.normalizeChapter(title)
            if title == '': title = '----------CHUA BIET------------'
            print '______________________________________________'
            print title
            print '______________________________________________'
            contentNode = nodecontent.xpath(".//div[@class='alt2' and @style='display: none;']")[0]
            content = html2text.html2text(commonlib.Etree.tostring(contentNode)).encode('utf-8', 'ignore')
            content = re.sub(r"\n", "<br />", content)
            content = "<b>{0}</b><br /><hr /><br />{1}".format(title, content)
            print content[:200]
            print len(content)
            if content.strip() != '':
                data.append({'title': title, 'content': content})
        return data
            
    def process(self):
        
        db = StoryModel(DATABASE)
        db.open(True)
        for page in range(self.startPage, self.lastPage + 1):
            url = self.urlfm.format(page)
            data = self.getDetail(url)
            
            for item in data:
                if item['title'] != '' and item['content'] != '':
                    db.add_story(commonlib.toUpper(item['title']), item['content'], 0)
        db.close()
            
        
if __name__ == '__main__':
    
    DATABASE = "/longhoanggiang/database/{0}".format("covotonggdxinhdep")
    URL = 'http://mangaclub.vn/forum/threads/5480-Do-Thi-Co-Vo-Tong-Giam-Doc-Xinh-Dep-Cua-Toi-Chuong-1064.html'
    
    c = Crawler()
    c.getDetail('http://mangaclub.vn/forum/threads/5480-Do-Thi-Co-Vo-Tong-Giam-Doc-Xinh-Dep-Cua-Toi-Chuong-1064.html/page101')
#    c.process()
    
    print '> Finished'
    os._exit(1)
    