# -*- coding: utf-8 -*-
'''
Created on Aug 30, 2014

@author: TRAM ANH
'''
import os
import sys
from urlparse import urljoin
from workerpool import jobs
# sys.path.append('C:/longhoanggiang/pyLib')
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import logging
import sqlite3
import json
import workerpool
from lxml import etree
import copy

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

class CrawlerJob(workerpool.Job):
    
    def __init__(self, callback, *args):
        self.callback = callback
        self.args = args
        
    def run(self):
        self.callback(*self.args)



class Crawler():
    
    def getDetails(self, chapter, path):
        print path
#         res = lib.Web.load(chapter['url'])
#         cookie = res.get_cookie()
        chapId = lib.extractText(r"manga24h.com/([^/]+)/", chapter['url'], 1)
        url = "http://manga24h.com/index.php?module=anime&act=ajax&resource=1&id={_id}".format(_id=chapId)
        imagesStr = lib.Web.load(url, cached=False, reqAjax=True, referer=chapter['url']).get_html().strip()
        imagesList = imagesStr.split("|")
        print len(imagesList)
        print imagesList
        if len(imagesList) < 5: raise Exception("Get images wrong way")
        output = path + "/files/{0}".format(chapter['chapid'])
        if not os.path.exists(os.path.dirname(output)):
            os.makedirs(os.path.dirname(output), 0777)
        lib.gz_file_put_content(lib.encryptCipher(json.dumps(imagesList)), output)
    
    def getChapters(self, url):
        tree = lib.Web.load(url, cached=True).build_tree()
        data = []
        for node in tree.xpath("//div[@class='lchap']//table[contains(@class, 'table_manga')]//tr/td[1]/a"):
            name = lib.stringify(node)
            link = node.get('href')
            if link == '': continue
            link = urljoin(url, link)
            data.append({'name': name, 'url': link, 'chapid': lib.md5(link)})
            print name, link
        data.reverse()
        description = ""
        try:
            description = lib.stringify(tree.xpath("//div[@class='item_truyen_detail']//div[@itemprop='description']")[0])
            if type(description).__name__ == 'unicode':
                description = description.encode('utf-8')
            description = description.strip()
        except:
            logging.warn("Description NULL at url {0}".format(url))
            
        return data, description
    
    
    def crawl(self, url):
        output = lib.extractText(r"/([^/]+)\.html", url, 1)
        path = "/longhoanggiang/comic/{0}".format(output)
        if not os.path.exists(path):
            os.makedirs(path, 0777)
        
        chapters, description = self.getChapters(url)
        saveChapters = copy.deepcopy(chapters)
        for chapter in saveChapters:
            del chapter['url']
        
        data = lib.encryptCipher(json.dumps(saveChapters))
        lib.gz_file_put_content(data, path + "/data")
        lib.file_put_content(description, path + "/description")
        
        pool = workerpool.WorkerPool(size=2)
        for chapter in chapters:
            pool.put(CrawlerJob(self.getDetails, chapter, path))
        pool.shutdown()
        pool.wait()
        
    def crawlMultiples(self, urls, output):
        if len(urls) == 0: return
        path = "/longhoanggiang/comic/{0}".format(output)
        if not os.path.exists(path):
            os.makedirs(path, 0777)
        chapters = []
        description = ""
        for urlItem in urls:
            url = urlItem['url']
            prefix = urlItem['prefix']
            ichapters, idescription = self.getChapters(url)
            for c in ichapters:
                if prefix != "":
                    c['name'] = "{0} - {1}".format(prefix, c['name'])
                chapters.append(c)
            if description == "": description = idescription
        saveChapters = copy.deepcopy(chapters)
        for chapter in saveChapters:
            del chapter['url']
        data = lib.encryptCipher(json.dumps(saveChapters))
        lib.gz_file_put_content(data, path + "/data")
        lib.file_put_content(description, path + "/description")
        pool = workerpool.WorkerPool(size=2)
        for chapter in chapters:
            pool.put(CrawlerJob(self.getDetails, chapter, path))
        pool.shutdown()
        pool.wait()
        
if __name__ == '__main__':
    
    sites = []
    item = {'prefix': 'Ô Long Viện 1', 'url': 'http://manga24h.com/29/O-Long-Vien.html'}
    sites.append(item)
    item = {'prefix': 'Ô Long Viện 2', 'url': 'http://manga24h.com/848/O-Long-Vien-2.html'}
    sites.append(item)
    
    
    c = Crawler()
#     c.crawlMultiples(sites, "O-Long-Vien")
    c.crawl('http://manga24h.com/1174/Cau-Be-Gioi-Vo.html')
    
    logging.info("Finished")
    os._exit(1)
    