# -*- coding: utf-8 -*-
'''
Created on Mar 1, 2013

@author: LONG HOANG GIANG
'''

from CrawlerLib2 import commonlib, html2text
from Model.storymodelv3 import StoryModel
from urlparse import urljoin
import mechanize
import os
import re


COOKIE = mechanize.LWPCookieJar()
BROWSER = mechanize.Browser()
BROWSER.set_cookiejar(COOKIE)

def loadWeb(url):
    respond = BROWSER.open(url)
    print respond.read()
    
    


class TungHoanh():
    
    __url = ''
    __cookie = ''
    
    def __init__(self, url):
        self.__url = url
        self.loadWebGetCookie()
        
    def loadWebGetCookie(self):
        self.__cookie = commonlib.loadweb('http://tunghoanh.com/').getcookie()
        
    def getChapters(self):
        res = commonlib.loadweb(self.__url)
        tree = res.build_tree()
        self.__cookie = res.getcookie()
        data = []
        for node in tree.xpath("//div[@class='story_chapter']//div[@class='chapter']/a"):
            url = node.get('href', '')
            if url == '': continue
            url = urljoin(self.__url, url)
            tree = commonlib.loadweb(url).build_tree()
            for inode in tree.xpath("//div[@class='list_chapters'][1]//select/option"):
                href = inode.get('value', '')
                if href == '': continue
                href = urljoin(self.__url, href)
                title = commonlib.stringify(inode)
                print title, href
                data.append({'title': title, 'url': href})
            return data
    
    def fetchChapter(self, chapid, referer):
        url = 'http://tunghoanh.com/chapter/{0}.html'.format(chapid)
        html = "<html>" + commonlib.loadweb(url, nocache=True, cookie=self.__cookie, referer=referer, ajax=True).gethtml() + "</html>"
        tree = commonlib.buildTreeFromHtml(html)
        html = html2text.html2text(commonlib.Etree.tostring(tree.xpath("//body"))).encode('utf-8')
##        print '=================================================='
##        print html
##        print '=================================================='
#        for match in re.finditer("(c|C)hương \d+", html):
#            print '??????????????FIND ITER'
#            if match.start() > -1:
#                html = html[match.start():]
        return html.strip()
    
    def getDetail(self, item):
        respond = commonlib.loadweb(item['url'], referer=self.__url)
        tree = respond.build_tree()
        self.__cookie = respond.getcookie()
        contentNode = tree.xpath("//div[@id='chapter_content']")[0]
        content = commonlib.stringify(contentNode)
        chapid = commonlib.extractText("FetchChapter\(\"(.+)\"\)", content, 1)
        print '________________________________'
        print chapid
        print '________________________________'
        html = self.fetchChapter(chapid, item['url'])
        html = re.sub("( *\<br /\> *){3,}", "<br /><br />", re.sub(r"\n", "<br />", html))
        return html
    
    def process(self, database, prefix=''):
        database = '/longhoanggiang/database/{0}'.format(database)
        smodel = StoryModel(database)
        smodel.open(True)
        data = self.getChapters()
        for chapter in data:
            title = "[{0}] - {1}".format(prefix, chapter['title']) if prefix.strip() != '' else chapter['title']
            title = commonlib.toUpper(title)
            detail = self.getDetail(chapter)
            print '_______________'
            print chapter['url']
            print '----------------'
            print detail[:200]
            print '_______________'
            if detail.strip() == '': raise Exception, 'Error: Null Content !!! at {0}'.format(chapter['url'])
            detail = '''<b>{0}</b><br /><hr /><br />{1}'''.format(title, detail)
            smodel.add_story(title, detail, 0)
        smodel.close()
            
        
if __name__ == '__main__':
    
    list_story = [
        ['http://tunghoanh.com/co-vo-tong-giam-doc-xinh-dep-cua-toi-lgaaaab.html', 'covotgdxd', ''],
    ]
    
    for story in list_story:
        print '======================== CRAWL STORY ========================='
        prefix = ''
        if len(story) == 3:
            prefix = story[2]
        a = TungHoanh(story[0])
        a.process(story[1], prefix)
    
    
    print '> Finished'
    os._exit(1)