# -*- coding: utf-8 -*-
'''
Created on Jan 4, 2013

@author: LONG HOANG GIANG
'''

from CrawlerLib2 import commonlib
from urlparse import urljoin
import datetime
import os
import json
import workerpool

PATH = '/longhoanggiang/truyentranh/'

class TruyenTranhTuan():
    
    pathfm = "/longhoanggiang/truyentranh/{0}/"
    
    def __init__(self, url):
        self.url = url
        sub = commonlib.extractText("truyentranhtuan.com/([^/]+)/", self.url, 1)
        if sub == '': sub = "untitled"
        self.path = self.pathfm.format(sub)
        if not os.path.isdir(self.path): os.makedirs(self.path, 0777) 
        
    def getListChapter(self, url):
        tree = commonlib.loadweb(url).build_tree()
        data = []
        for node in tree.xpath("//div[@id='content-main']//td[contains(@class, 'tbl_body')]//a"):
            title = commonlib.stringify(node)
            link = node.get('href', '')
            if link == '': continue
            link = urljoin(url, link)
            data.append({'title': title, 'url': link})
        return data
    
    def getChapterImageLink(self, item):
        url = item['url']
        html = commonlib.loadweb(url).gethtml()
        images = commonlib.extractText("var slides2=\[\"([^\]]+)\"\]", html, 1).split('","')
        for i in range(0, len(images)):
            images[i] = urljoin(url, images[i])
        chieudai = len(images) - 1
        for i in range(0, chieudai):
            for j in range(i+1, chieudai + 1):
                if images[j] < images[i]:
                    temp = images[j]
                    images[j] = images[i]
                    images[i] = temp
        return images
    
    def workerjob(self, chapter):
        images = self.getChapterImageLink(chapter)
        chapterNum = commonlib.extractText("/(\d+)/", chapter['url'], 1)
        data = {'title': chapter['title'], 'images': images}
        fp = open(self.path + chapterNum, 'w')
        json.dump(data, fp)
        fp.close()
        print chapter['title']
    
    def process(self):
        chapters = self.getListChapter(self.url)
        fp = open(self.path + "index", 'w')
        json.dump(chapters, fp)
        fp.close()
        pool = workerpool.WorkerPool(size=5)
        pool.map(self.workerjob, chapters)
        pool.shutdown()
        pool.wait()
            
    
if __name__ == '__main__':
    
    c = TruyenTranhTuan('http://truyentranhtuan.com/dai-duong-song-long-truyen/')
    c.process()
    
    print '> Finished at {0}'.format(datetime.datetime.now())
    os._exit(1)
    