# -*- coding: utf-8 -*-
'''
Created on Mar 9, 2013

@author: LONG HOANG GIANG
'''
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import pyLib
import re, json
from urlparse import urljoin
from Cheetah.Template import Template
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

class Manga24():
    
    _cookie_str = ''
    _url = ''
    _rootUrl = 'http://manga24h.com/'
    _output = ''
    _prefix_chapter = ''
    
    def __init__(self, url):
        self._url = url
        self._output = "{0}-{1}".format(pyLib.extractText("/(\d+)/", url, 1), pyLib.extractText("/([^/]+)\.html", url, 1))
        self.load2Cookie()
    
    def load2Cookie(self):
        fp = pyLib.loadWeb(self._rootUrl)
        self._cookie_str = fp.getcookie()
    
    def getDetail(self, item):
        tree = pyLib.loadWeb(item['url']).build_tree()
        data = []
        for img in tree.xpath("//div[@class='row']/div/img"):
            src = img.get('src', '').strip()
            if src == '': continue
            print src
            data.append(src)
        return data
    
    def getIntro(self):
        try:
            tree = pyLib.loadWeb(self._url, cookiestr=self._cookie_str).build_tree()
            mangaInfoNode = tree.xpath("//ul[@class='mangainfo']")[0]
            pyLib.Etree.clean(mangaInfoNode.xpath(".//li[last()]"))
            html = pyLib.Etree.tostring(mangaInfoNode) + "\n<p>&nbsp;</p>\n"
            descriptionNode = tree.xpath("/html/body/div[4]/div/div/div[contains(@style, 'overflow: hidden; color: #000000;')]/div")[0]
            descriptionText = pyLib.cleanHTML(pyLib.Etree.tostring(descriptionNode))
            descriptionText = re.sub(r"\n", "<br />", descriptionText)
            html += descriptionText
            html = Template(file='intro.tpl', searchList=[{'content': html}])
            return str(html)
        except:
            return None
    
    def getChapters(self):
        tree = pyLib.loadWeb(self._url, cookiestr=self._cookie_str).build_tree()
        data = []
        for node in tree.xpath("//table[contains(@class, 'tablesorter')]/tbody/tr/td[1]/a"):
            title = pyLib.stringify(node)
            href = node.get('href', '')
            if href == '': continue
            href = urljoin(self._rootUrl, href)
            if self._prefix_chapter.strip() != '':
                title = "{0}{1}".format(self._prefix_chapter, title)
            data.append({'name': title, 'url': href})
            print title, href
            intro = self.getIntro()
        return data, intro
    
    def setPrefixChapter(self, val):
        self._prefix_chapter = val    
    
    def process(self):
        chapters, intro = self.getChapters()
        output = "/longhoanggiang/comic/{0}/".format(self._output)
        output_files = output + "files"
        pyLib.createIfNotExistsPath(output_files)
        if intro is not None:
            pyLib.file_put_content(output + 'intro.html', intro)
        data = []
        for chapter in chapters:
            chapterId = pyLib.crc32unsigned(chapter['url'])
            imgArr = self.getDetail(chapter)
            pyLib.gzip(output_files + "/" + chapterId, pyLib.encryptCipher(json.dumps(imgArr)))
            data.append({'name': chapter['name'], 'chapid': chapterId})
        data.reverse()
        pyLib.gzip(output + 'data', pyLib.encryptCipher(json.dumps(data)))


def join(source, destination):
    if not source.endswith("/"): source += "/"
    if not destination.endswith("/"): destination += "/"
    data1 = json.loads(pyLib.decryptCipher(pyLib.gz_file_get_content(source + "data")))
    data2 = json.loads(pyLib.decryptCipher(pyLib.gz_file_get_content(destination + "data")))
    
    for item in data1:
        data2.append(item)
        
    pyLib.gz_file_put_content(destination + "data-new", pyLib.encryptCipher(json.dumps(data2)))
    
    
    
    

if __name__ == '__main__':
    
    logging.info("start crawler")
    url = raw_input("URL: ")
    if url.startswith("http://"):
        worker = Manga24(url)
    #    worker.setPrefixChapter("")
    #    worker.getIntro()
        worker.process()
    else:
        logging.warn("invalid url")
    logging.info("finished")
    os._exit(1)
    
    