# -*- coding: utf-8 -*-
'''
Created on 18-11-2012

@author: LONG HOANG GIANG
'''
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from urlparse import urljoin
import datetime
import json
import pyLib
import re
import workerpool



class DocHanh():
    
    baseURL = 'http://dochanh.net/'
    chapterPrefix = "CHAPTER {0}"
    cookiestr = ''
    output = "/longhoanggiang/truyentext/_dochanh/"
    
    def __init__(self):
        if not self.output.endswith("/"):
            self.output += "/"
    
    def detectTypeURL(self, url):
        if re.search(r"/truyen-\d+/", url):
            res = pyLib.loadWeb(url, cookiestr=self.cookiestr)
            html = res.gethtml()
            startReading = pyLib.extractText(r",start_reading='(.+)',share_title", html, 1)
            if startReading == '': raise Exception, 'Error occurred when detect start chapter URL' 
            return urljoin(self.baseURL, startReading)
        return url
    
    def post(self, chapterId, cookie):
        url = 'http://dochanh.net/singleee/{0}'.format(chapterId)
        print '> get content from {0}'.format(url)
        res = pyLib.loadWeb(url, cookiestr=self.cookiestr)
        html = res.gethtml()
        self.cookiestr = res.getcookie()
        return html
    
    def process(self, storyUrl, storyId=''):
        print '>>> process story {0}'.format(storyUrl)
        if storyId == '':
            output_path = self.output + pyLib.extractText(r"/([^/]+)\.html", storyUrl, 1)
        else:
            output_path = self.output + storyId
        if not output_path.endswith("/"): output_path += "/"
        files_output_path = output_path + "files"
        pyLib.createIfNotExistsPath(files_output_path)
        url = self.detectTypeURL(storyUrl)
        
        data = []
        while url != '':
            res = pyLib.loadWeb(url, cookiestr=self.cookiestr)
            html = res.gethtml()
            self.cookiestr = res.getcookie()
            chapterId = pyLib.extractText(r"/chap-(\d+)/", url, 1)
            currentChap = pyLib.extractText(r"total_chap=\d+,curent_chap=(\d+),next_chap=\d+", html, 1)
            if currentChap == '': raise Exception, 'Error occurred while get current chapter number'
            currentChap = self.chapterPrefix.format(currentChap)
            content = self.post(chapterId, self.cookiestr)
            content = pyLib.cleanHTML(content, ['b', 'strong'])
            content = re.sub(ur"\n", "<br />", content)
            url = pyLib.extractText(r"str_next='([^']+)',", html, 1)
            content = "<b>{0}</b><br /><hr /><br />{1}".format(currentChap, content)
            detailFile = pyLib.crc32unsigned(url)
            filePath = files_output_path + "/" + detailFile
            pyLib.gzip(filePath, json.dumps(content))
            data.append({'name': currentChap, 'path': detailFile})
            print "TITLE: {0}".format(currentChap)
            print "CONTENT: {0}".format(content)
        
        pyLib.gzip(output_path + 'data', pyLib.encryptCipher(json.dumps(data)))
        return data
        
    def processCategory(self, catUrl, minPage, maxPage):
        data = []
        for page in range(minPage, maxPage+1):
            url = catUrl + '&p={0}'.format(page)
            fp = pyLib.loadWeb(url, cookiestr=self.cookiestr)
            if (fp.getcookie() != ''):
                self.cookiestr = fp.getcookie()
            tree = fp.build_tree()
            for item in tree.xpath("//div[@id='thread_list']/div[contains(@class, 'thread-list-padding')]//div[@class='thread-list-title']/a"):
                storyName = pyLib.stringify(item)
                link = item.get('href', '')
                if link == '': continue
                link = urljoin(catUrl, link)
                data.append({'name': storyName, 'url': link, 'path': pyLib.crc32unsigned(link)})
        
        print '-------------------------------------------------'
        print '------------ DONE GET LINK CHAPTER --------------'
        print '-------------------------------------------------'
        pyLib.gzip(self.output + 'data', pyLib.encryptCipher(json.dumps(data)))
        pool = workerpool.WorkerPool(size=1)
        for item in data:
            pool.put(WorkerJob(self.process, item['url'], item['id']))
        pool.shutdown()
        pool.wait()
        

class WorkerJob(workerpool.Job):
    
    _method = None
    _arg1 = None
    _arg2 = None
    
    def __init__(self, method, arg1, arg2):
        self._method = method
        self._arg1 = arg1
        self._arg2 = arg2
       
    def run(self):
        self._method(self._arg1, self._arg2)


if __name__ == '__main__':
    dochanh = DocHanh()
    
    listStories = [
        'http://dochanh.net/truyen-12762/hoa-bao-thien-vuong.html'
        ]
    
    for s in listStories:
        dochanh.process(s)
    
#    dochanh.processCategory('http://dochanh.net/actions/thread&cid=100', 1, 2)
    print '> Finished at {0}'.format(datetime.datetime.now())
    os._exit(1)
