# -*- coding: utf-8 -*-
'''
Created on Apr 22, 2012

@author: LONG HOANG GIANG
'''

import sys, os
import time
import workerpool
sys.path.append(os.path.expanduser('/home5/vietcntt/longhoanggiang/python'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import re
import simplejson as json
from CrawlerLib import Http, commonlib
from urlparse import urljoin

html_cfg = '''<html>
<head>
<meta content="text/html;charset=utf-8" http-equiv="content-type" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta name="format-detection" content="telephone=no" />
<title>#TITLE#</title>
<style type="text/css">
body {
    text-align: justify;
    text-justify: newspaper;
}
</style>
</head>
<body>
#CONTENT#
</body>
</html>
</html>'''

class VNThuQuan():
    
    output = '/wattpad/'
    
    def __init__(self, title):
        self.title = title
    
    def getHtml(self, url, pos):
        print 'getHtml({0}, {1})'.format(pos, url)
        html = Http.getHtml(url)
        content_html = commonlib.get_html_content(commonlib.convertHTMLEntitiesToUnicode(html))
        ohtml = re.sub(r"(#TITLE#)", self.title, html_cfg)
        html = re.sub(r"(#CONTENT#)", content_html, ohtml)
        filePath = "{0}{1}.html".format(self.output, pos)
        if not os.path.exists(os.path.dirname(filePath)): os.makedirs(os.path.dirname(filePath), 0777)
        commonlib.file_put_contents(filePath, html)
        return
    
    def set_output(self, output):
        if output[-1] != '/': output += '/'
        self.output = output
    
    def process(self, url):
        html = Http.getHtml(url)
        tree = commonlib.build_tree_from_html(html)
        if tree == None:
            print 'Build tree from html error'
            sys.exit(1)
        
        data = {'link': [], 'pos': []}
        pageCount = 0
        for item in tree.xpath("//table[@class='toolbar text-toolbar']//acronym/a"):
            link = item.get('href', '')
            if link == '': continue
            pageCount += 1
            link = urljoin(url, link)
            data['link'].append(link)
            data['pos'].append(pageCount)
        pool = workerpool.WorkerPool(size=5)
        pool.map(self.getHtml, data['link'], data['pos'])
        pool.shutdown()
        pool.wait()
        return
        
#url = 'http://vnthuquan.net/truyen/truyen.aspx?tid=2qtqv3m3237nvnmn3n1n31n343tq83a3q3m3237nvn'
#vntq = VNThuQuan("Đêm Của Ma Mèo")
#vntq.set_output('/demcuamameo/')
#vntq.process(url)
