# -*- coding: utf-8 -*-
'''
Created on Sep 10, 2014

@author: TRAM ANH
'''
import os
import sys
from urlparse import urljoin
from workerpool import jobs
# sys.path.append('C:/longhoanggiang/pyLib')
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import logging
import sqlite3
import json
import workerpool
from lxml import etree
import copy

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

class CrawlerJob(workerpool.Job):
    
    def __init__(self, callback, *args):
        self.callback = callback
        self.args = args
        
    def run(self):
        self.callback(*self.args)
        
        
class Crawler():
    
    def getDetails(self, chapter, path):
        imagesList = []
        tree = lib.Web.load(chapter['url']).build_tree()
        for node in tree.xpath("//article[@id='content']//img"):
            link = node.get('src')
            if link == '': continue
            link = urljoin(chapter['url'], link)
            link = link.replace("imgmax=2048", "imgmax=1024")
            imagesList.append(link)
        print imagesList
        output = path + "/files/{0}".format(chapter['chapid'])
        if not os.path.exists(os.path.dirname(output)):
            os.makedirs(os.path.dirname(output), 0777)
        lib.gz_file_put_content(lib.encryptCipher(json.dumps(imagesList)), output)
    
    def getChapters(self, url):
        tree = lib.Web.load(url, cached=True).build_tree()
        data = []
        for node in tree.xpath("//div[@id='list-chapters']//span[@class='title']/a"):
            name = lib.stringify(node)
            link = node.get('href')
            if link == '': continue
            link = urljoin(url, link)
            data.append({'name': name, 'url': link, 'chapid': lib.md5(link)})
            print name, link
#         data.reverse()
        return data
    
    
    def crawl(self, url):
        output = lib.extractText(r"/truyen/(.+)", url, 1)
        path = "/longhoanggiang/comic/{0}".format(output)
        if not os.path.exists(path):
            os.makedirs(path, 0777)
        
        chapters = self.getChapters(url)
        saveChapters = copy.deepcopy(chapters)
        for chapter in saveChapters:
            del chapter['url']
        
        data = lib.encryptCipher(json.dumps(saveChapters))
        lib.gz_file_put_content(data, path + "/data")
        lib.file_put_content("", path + "/description")
        
        pool = workerpool.WorkerPool(size=2)
        for chapter in chapters:
            pool.put(CrawlerJob(self.getDetails, chapter, path))
        pool.shutdown()
        pool.wait()
        
    def crawlMultiples(self, urls, output):
        if len(urls) == 0: return
        path = "/longhoanggiang/comic/{0}".format(output)
        if not os.path.exists(path):
            os.makedirs(path, 0777)
        chapters = []
        for urlItem in urls:
            url = urlItem['url']
            prefix = urlItem['prefix']
            ichapters = self.getChapters(url)
            for c in ichapters:
                if prefix != "":
                    c['name'] = "{0} - {1}".format(prefix, c['name'])
                chapters.append(c)
        saveChapters = copy.deepcopy(chapters)
        for chapter in saveChapters:
            del chapter['url']
        data = lib.encryptCipher(json.dumps(saveChapters))
        lib.gz_file_put_content(data, path + "/data")
        lib.file_put_content("", path + "/description")
        pool = workerpool.WorkerPool(size=2)
        for chapter in chapters:
            pool.put(CrawlerJob(self.getDetails, chapter, path))
        pool.shutdown()
        pool.wait()
        
if __name__ == '__main__':
    
#     sites = []
#     item = {'prefix': 'Ô Long Viện 1', 'url': 'http://manga24h.com/29/O-Long-Vien.html'}
#     sites.append(item)
#     item = {'prefix': 'Ô Long Viện 2', 'url': 'http://manga24h.com/848/O-Long-Vien-2.html'}
#     sites.append(item)
    c = Crawler()
    c.crawl('http://blogtruyen.com/truyen/hiep-khach-giang-ho')
#     c.getDetails({'name': '', 'url': 'http://blogtruyen.com/truyen/long-phi-bat-bai/chap-1', 'chapid': "abc"}, '')
    
    logging.info("Finished")
    os._exit(1)
    
    
    