# -*- coding: utf-8 -*-
'''
Created on Apr 15, 2012

@author: LONG HOANG GIANG
'''
import sys, os
import traceback
import workerpool
sys.path.append(os.path.expanduser('/home5/vietcntt/longhoanggiang/python'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from CrawlerLib import Http, commonlib
from urlparse import urljoin
import datetime
import re
import json
import gzip
import threading

class Manga24():
    
    ssData = []
    execution_times = 1
    
    def __init__(self):
        pass
    
    def get_cookie_str(self):
        cookie = "location.href;expires={0}".format(commonlib.toUTCTimeString(datetime.datetime.now() + datetime.timedelta(seconds=3600000)))
        return cookie
    
    def build_url_from_id(self, sId):
        if sId.startswith('http://'): return sId
        return 'http://manga24h.com/{0}/a.html'.format(sId)
    
    def validate_output(self, output):
        if os.path.dirname(output) == '': output = './' + output
        if not os.path.isdir(os.path.dirname(output)): os.makedirs(os.path.dirname(output), 0777)
        return output
    
    def getChapter(self, url, idx):
        print 'getChapter url = {0}'.format(url)
        try:
            tree = Http.getXMLTree(url, cookie=self.get_cookie_str())
            name = commonlib.stringify(tree.xpath("//span[@class='manga_name']"))
            images = []
            for item in tree.xpath("//ul[@id='portfolio']/li/img"):
                src = commonlib.normalize_str(item.get('src', ''))
                if src != '':
                    src = urljoin('url', src)
                    images.append(src)
            if len(images)>0:
                lock = threading.RLock()
                lock.acquire()
                try:
                    self.ssData.append({'chapter': name, 'images': images, 'idx': idx})
                finally:
                    lock.release()
            return {'chapter': name, 'images': images}
        except:
            traceback.print_exc()
            sys.exit(1)
            
    def retry(self, url, output):
        self.process(url, output)
    
    def process(self, url_or_id, output):
        output = self.validate_output(output)
        if self.execution_times > 3:
            print '>> Max retry times > 3' 
            os._exit(1)
        self.execution_times += 1
        url = self.build_url_from_id(url_or_id) if re.search("[^\w]", url_or_id) else url_or_id
        print url
        try:
            tree = Http.getXMLTree(url, cookie=self.get_cookie_str())
            if tree == None: raise Exception, traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
            urls = []
            idx = []
            pos = 0
            for item in tree.xpath("//table[@class='mytable']//tr/th/a"):
                link = commonlib.normalize_str(item.get('href', ''))
                if link != '':
                    link = urljoin('http://manga24h.com', link)
                    urls.append(link)
                    idx.append(pos)
                    pos += 1
            self.ssData = []
            pool = workerpool.WorkerPool(size=5)
            pool.map(self.getChapter, urls, idx)
            pool.shutdown()
            pool.wait()
            self.ssData = sorted(self.ssData, key=lambda k: k['idx'])
#            if len(self.ssData) != len(urls): raise 
            for item in self.ssData: print item['chapter']
            fp = gzip.open(output, 'wb')
            jsondata = json.dumps(self.ssData)
            fp.write(jsondata)
            fp.close()
        except:
            traceback.print_exc()
            self.retry(url_or_id, output)
        return
    

#if __name__ == '__main__':
#    
#    a = Manga24()
#    a.process("http://manga24h.com/65/BLEACH.html", "/home5/vietcntt/public_html/site-api-vietcntt/res/truyen/bleach.data")
#    print '>> Finished at {0}'.format(commonlib.getGMT7Time())
#    os._exit(1)