# -*- coding: utf-8 -*-
'''
Created on Apr 27, 2012

@author: LONG HOANG GIANG
'''
import sys, os
import traceback
import workerpool
sys.path.append(os.path.expanduser('/home5/vietcntt/longhoanggiang/python'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from CrawlerLib import Http, commonlib
from urlparse import urljoin
import datetime
import re
import simplejson as json
import gzip
import threading

class Truyen18():
    
    ssData = []
    
    def __init__(self):
        pass
    
    def standardImgTag(self, img):
        pattern = ["s\.(jpg|JPG|png|PNG|gif|GIF|bmp|BMP)", "s250/", "%20", "%2f", "%3a", "http://images0-focus-opensocial\.googleusercontent", 
                   "https://images0-focus-opensocial\.googleusercontent", "http://images-focus-opensocial\.googleusercontent", "https://images-focus-opensocial\.googleusercontent", "http://images2-focus-opensocial\.googleusercontent", 
                   "https://images2-focus-opensocial\.googleusercontent", "\.com/gadgets/proxy\?container=focus&gadget=a&no_expand=1&resize_h=0&rewriteMime=image%2F\*&url=",
                   "\.com/gadgets/proxy\?container=focus&gadget=vn&no_expand=1&refresh=31536000&resize_w=1600&rewriteMime=image%2F\*&url=", 
                   "lh3", "lh4", "lh5", "lh6", "ggpht", "googleusercontent", "w642h", "/s160/", "/s72/", "/s94/", "/s110/", "/s128/", "/s288/", "/s320/", "/s800/",
                   "thumb/", "jpeg", "jpeeg", "\.(jpg|JPG|png|PNG|gif|GIF|bmp)"]
        repl = [".\\1", "s1600/", "%2520", "/", ":", '', '', '', '', '', '', '', '', '1', '2', '3', '4', 'bp.blogspot', 'bp.blogspot', '1024x768', "/s1600/", "/s1600/", "/s1600/", "/s1600/", "/s1600/", "/s1600/", "/s1600/", "/s1600/",
                "big/", "jpg", "jpeg", ".\\1?imgmax=1600"]
        
        for i in range(0, len(pattern)):
            img = re.sub(pattern[i], repl[i], img)
        return img

    def blockImg(self, img):
        result = False
        if img == '': result = True
        pattern = ["recruiting\d\.[a-z]+", "coppy-truyen18\.org\.[a-z]", "x+\.(jpg|png|gif|bmp)", "khung\.(jpg|png|gif|bmp)", "hoantat\..+"]
        for ipattern in pattern:
            if re.search(ipattern, img): 
                result = True
                break
        return result
    
    def process_chapter(self, chapter):
        url = chapter['url']
        name = chapter['name']
        print '>> process_chapter {0} : {1}'.format(name, url)
        try:
            html = Http.getHtml(url)
            html = re.sub("\[img\]|\[IMG\]", "<img src='", html)
            html = re.sub("\[/img\]|\[/IMG\]", "' />", html)
            tree = commonlib.build_tree_from_html(html)
            if tree == None: return
            idata = {'chapter': name, 'images': [], 'idx': chapter['idx']}
            for item in tree.xpath("//textarea[@id='truyen18-eedit']/p//img"):
                img = commonlib.normalize_str(item.get('src', ''))
                img = urljoin(url, img)
                img = self.standardImgTag(img)
                if (self.blockImg(img)): continue
                print img
                idata['images'].append(img)
            if idata['images']==0: return
            lock = threading.RLock()
            lock.acquire()
            try:
                self.ssData.append(idata)
            finally:
                lock.release()
        except:
            traceback.print_exc()
        return
        
    
    def process(self, url, output='/truyen18.out'):
        tree = Http.getXMLTree(url)
        if tree == None: return
        items = []
        idx = 0
        for item in tree.xpath("//table[@class='listing']/tbody/tr[position()>1]/td[1]/a"):
            name = commonlib.stringify(item)
            link = commonlib.normalize_str(item.get('href', ''))
            if link == '': continue
            link = urljoin(url, link)
            items.append({'name': name, 'url': link, 'idx': idx})
            idx += 1
        pool = workerpool.WorkerPool(size=4)
        pool.map(self.process_chapter, items)
        pool.shutdown()
        pool.wait()
        
        # sort data with idx and remove idx key from data
        self.ssData = sorted(self.ssData, key=lambda k: k['idx'])
        for item in self.ssData: 
            del item['idx']
            print item['chapter']
        
        fp = gzip.open(output, 'wb')
        jsondata = json.dumps(self.ssData)
        fp.write(jsondata)
        fp.close()
        print '(!) saved data to {0}'.format(output)
        return
        
    
if __name__ == '__main__':
    
    a = Truyen18()
    a.process('http://www.truyen18.org/truyen/truyen-nhan--atula-ii/2464.html', '/truyennhanatula2.data')
    print '>> Finished at {0}'.format(commonlib.getGMT7Time())
    os._exit(1)