# -*- coding: utf-8 -*-
'''
Created on Apr 24, 2012

@author: LONG HOANG GIANG
'''
import sys, os
import traceback
import workerpool
sys.path.append(os.path.expanduser('/home5/vietcntt/longhoanggiang/python'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from CrawlerLib import Http, commonlib
from urlparse import urljoin
import re
import simplejson as json
import gzip
import threading


class Vechai():
    
    ssData = []
    execution_times = 1
    
    def __init__(self):
        pass
    
    def validate_output(self, output):
        if os.path.dirname(output) == '': output = './' + output
        if not os.path.isdir(os.path.dirname(output)): os.makedirs(os.path.dirname(output), 0777)
        return output
    
    def standard_image_tag(self, html):
        ss = [("http://(ca|cz|va|vb|cb)[0-4]\.upanh", "http://ca2.upanh"),
                   ("http://(ca|cz|va|vb|cb)[5-9]\.upanh", "http://cb0.upanh"),
                   ("img-photo\.apps\.zing\.vn", "d.f1.photo.zdn.vn"),
                   ("img2-photo\.apps\.zing\.vn", "d.f4.photo.zdn.vn"),
                   ("img\.photo\.zing\.vn", "d.f2.photo.zdn.vn"),
                   ("t\.f6\.photo\.zdn\.vn", "d.f6.photo.zdn.vn"),
                   ("(s160|s72|s94|s110|s128|s228|s800)/", "s1600/"),
                   ("smalls|w160h|w240|bigs500x320|w642|sources", "1024x768"),
                   ("gguct", ""), ("http(s)?://lh", "http://"),
                   ("http(s)?://images\d*-focus-opensocial\.googleusercontent", ""),
                   ("\.com/gadgets/proxy\?container=focus&gadget=a&no_expand=1&resize_h=0&rewriteMime=image%2F\*&url=", ""),
                   ("\.com/gadgets/proxy\?container=focus&gadget=a&no_expand=1\&refresh=31536000&resize_w=1600&rewriteMime=image%2F\*&url=", ""),
                   ("prefocus", "images-focus-opensocial.googleusercontent"),
                   ("googleusercontent|ggpht", "bp.blogspot"),
                   ("\.(jpg|JPG|png|PNG|gif|GIF|bmp|BMP)", ".\\1?imgmax=1600"),
                   ("%252520", ""), ("%20", "%2520"), ("%3a", ":"), ("%2f", "/"),
                   ("s\.(jpg|JPG|png|PNG|gif|GIF|bmp|BMP)", ".\\1")
                   ]
        result = html
        for pat, rpl in ss:
            result = re.sub(r"{0}".format(pat), rpl, result)
        return result
    
    def is_block_image(self, img):
        block = ["credit.*(copy)?\.(jpg|JPG|png|PNG|gif|GIF)"]
        for p in block:
            if re.search(p, img): return True
        return False 

    def getChapter(self, url, name, idx):
        print 'getChapter url = {0}'.format(url)
        try:
            html = self.standard_image_tag(Http.getHtml(url))
            tree = commonlib.build_tree_from_html(html)
            images = []
            list_of_xpath = ["//*[@id='vcfix']//img", "//div[@class='entry']/div[@class='entry1']//img"]
            nodeList = []
            for xp in list_of_xpath:
                try:
                    nodeList = tree.xpath(xp)
                    if len(nodeList) > 5: break
                except:
                    continue
            lpNode = None
            lppNode = None
            for item in nodeList:
                src = commonlib.normalize_str(item.get('src', ''))
                # loại bỏ một số image quảng cáo phía cuối, do không xác định được xpath cố định nên check parent node và parent of parent node
                pNode = item.xpath("./..")
                ppNode = item.xpath("./../..")
                if len(pNode) > 0: pNode = pNode[0]
                if len(ppNode) > 0: ppNode = ppNode[0]
                if lpNode == None: lpNode = pNode
                if lppNode == None: lppNode = ppNode
                if lpNode != None and lppNode != None and ppNode != None and ppNode != None:
                    if lpNode.tag != pNode.tag or lppNode.tag != ppNode.tag: continue
                if src != '':
                    src = urljoin('url', src)
                    if self.is_block_image(src): continue
                    print src
                    images.append(src)
            if len(images)>0:
                lock = threading.RLock()
                lock.acquire()
                try:
                    self.ssData.append({'chapter': name, 'images': images, 'idx': idx})
                finally:
                    lock.release()
        except:
            traceback.print_exc()
            sys.exit(1)
        return
            
    def retry(self, url, output):
        self.process(url, output)
    
    def process(self, url, output):
        output = self.validate_output(output)
        if self.execution_times > 3:
            print '>> Max retry times > 3' 
            os._exit(1)
        self.execution_times += 1
        try:
            tree = Http.getXMLTree(url)
            if tree == None: raise Exception, traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
            items = {'url': [], 'idx': [], 'name': []}
            pos = 0
            for xp in ["//div[@class='baitonghop']/a[contains(@href, 'vechai')]",
                       "//strong[.='READ ONLINE']/../../following-sibling::*[name()='a']"]:
                nodeList = tree.xpath(xp)
                if len(nodeList) == 0: continue
                for item in nodeList:
                    link = commonlib.normalize_str(item.get('href', '')).strip()
                    if 'adf.ly' in link: continue
                    if link != '':
                        link = urljoin(url, link)
                        items['url'].append(link)
                        items['idx'].append(pos)
                        items['name'].append(commonlib.normalize_str(commonlib.stringify(item)))
                        pos += 1
                self.ssData = []
                pool = workerpool.WorkerPool(size=3)
                pool.map(self.getChapter, items['url'], items['name'], items['idx'])
                pool.shutdown()
                pool.wait()
                self.ssData = sorted(self.ssData, key=lambda k: k['idx'])
    #            if len(self.ssData) != len(items['url']): raise 
                for item in self.ssData: print item['chapter']
                fp = gzip.open(output, 'wb')
                jsondata = json.dumps(self.ssData)
                fp.write(jsondata)
                fp.close()
        except:
            traceback.print_exc()
            self.retry(url, output)
        return

if __name__ == '__main__':
    
    a = Vechai()
    a.process('http://vechai.info/phong-van/', '/home5/vietcntt/public_html/site-api-vietcntt/res/truyen/phongvan.data')
    os._exit(1)
