# -*- coding: utf-8 -*-
'''
Created on May 3, 2013

@author: LONG HOANG GIANG
'''
import sys, os
from urlparse import urljoin
from Cheetah.Template import Template
import workerpool
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import pyLib
import logging
import json
import re

logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

class VNSharing():
    
    _output = "/longhoanggiang/comic"
    
    def __init__(self):
        pass
    
    
    def getImageDetail(self, item):
        logging.debug("getImageDetail {0}".format(item['url']))
        tree = pyLib.loadWeb(item['url']).build_tree()
        images = []
        for scriptNode in tree.xpath("//script[contains(., 'lstImages.push(\"http')]"):
            text = pyLib.stringify(scriptNode)
            for p in re.finditer(r"lstImages\.push\(\"(http://[^)]+)\"\)", text):
                url = p.group(1)
                if url == '': continue
                url = urljoin(item['url'], url)
                url = re.sub(r"\?imgmax=\d+", "?imgmax=900", url)
                print url
                images.append(url)
        pyLib.gz_file_put_content(self._output + "/files/" + item['chapid'], pyLib.encryptCipher(json.dumps(images)))
                
    
    def getListChapter(self, url):
        
        data = []
        tree = pyLib.loadWeb(url).build_tree()
        for node in tree.xpath("//table[@class='listing']/tr[position()>1]/td[1]/a"):
            name = pyLib.stringify(node)
            href = node.get('href', '')
            if href == '': continue
            href = urljoin(url, href)
            print name, href
            data.append({'name': name, 'url': href, 'chapid': pyLib.crc32unsigned(href)})
        
        data.reverse()
        
        metaNode = tree.xpath("//div[@id='leftside']//div[@class='bigBarContainer']")[0]
        pyLib.Etree.clean(metaNode.xpath(".//span[@id='spanBookmark']"))
        pyLib.Etree.clean(metaNode.xpath(".//img[contains(@src, 'images/tlicon.gif')]/.."))
        metaText = pyLib.cleanHTML(pyLib.Etree.tostring(metaNode)).strip()
        metaText = re.sub(r"\n", "<br />", metaText)
        html = str(Template(file='intro.tpl', searchList=[{'content': metaText}]))
        return data, html
        
    
    def process(self, url):
        folderName = pyLib.extractText(r"Truyen/([^?]+)", url, 1).strip().strip('-')
        self._output = "{0}/{1}".format(self._output, folderName)
        pyLib.createIfNotExistsPath(self._output + "/files")
        chapters, introHtml = self.getListChapter(url)
        pyLib.file_put_content(self._output + "/intro.html", introHtml)
        
#        for chapter in chapters:
#            self.getImageDetail(chapter)
#            break
        pool = workerpool.WorkerPool(size=5)
        pool.map(self.getImageDetail, chapters)
        pool.shutdown()
        pool.wait()
        
        for chapter in chapters:
            del chapter['url']
        
        pyLib.gz_file_put_content(self._output + "/data", pyLib.encryptCipher(json.dumps(chapters)))
        
        
        
        
if __name__ == '__main__':
    
    w = VNSharing()
    w.process('http://truyen.vnsharing.net/Truyen/Dragon-Ball')
    
    logging.info("finished")
    os._exit(1)