# -*- coding: utf-8 -*-
'''
Created on Apr 17, 2012

@author: LONG HOANG GIANG
'''

import sys, os
import workerpool
sys.path.append(os.path.expanduser('/home5/vietcntt/longhoanggiang/python'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import datetime
import re
import gzip
import traceback
import MySQLdb
import simplejson as json
from CrawlerLib import Http, commonlib, Log
from urlparse import urljoin
from MySQLdb import escape_string

DBNAME = 'vietcntt_api_vietcntt'
DBUSER = 'vietcntt_api'
DBPASS = 'n6&S01?PXDwM'

def getConnection(dbName, userName, password, hostName='localhost'):
    try:
        return MySQLdb.connect(host=hostName, user=userName, passwd=password, db=dbName, charset='utf8', use_unicode=True)
    except MySQLdb.Error, e:
        print 'Error %d: %s' % (e.args[0], e.args[1])
    return None

def error_log(msg):
    f = open('error.log', 'a')
    if f != None:
        f.write("{0}\n".format(msg))
        f.close()

def createTable():
    conn = getConnection(DBNAME, DBUSER, DBPASS, hostName='localhost')
    if conn != None:
        cursor = conn.cursor()
        cursor.execute("""
            CREATE TABLE IF NOT EXISTS gq_ecchi(
                `id` INT UNSIGNED AUTO_INCREMENT,
                `name` VARCHAR(200),
                `author` VARCHAR(150),
                `description` TEXT,
                `tag` VARCHAR(200),
                `url` VARCHAR(200),
                `hashUrl` VARCHAR(100),
                `file` VARCHAR(255),
                PRIMARY KEY (`id`),
                INDEX(name),
                INDEX(hashUrl)
            ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
            
        """)
        cursor.close()
        conn.comit()
        conn.close()
    ''' CREATE INDEX name_index ON gq_ecchi(name);
        CREATE INDEX hash_url_index ON gq_ecchi(hashUrl);'''
    return

def standardImgTag(img):
    pattern = ["s\.(jpg|JPG|png|PNG|gif|GIF|bmp|BMP)", "s250/", "%20", "%2f", "%3a", "http://images0-focus-opensocial\.googleusercontent", 
               "https://images0-focus-opensocial\.googleusercontent", "http://images-focus-opensocial\.googleusercontent", "https://images-focus-opensocial\.googleusercontent", "http://images2-focus-opensocial\.googleusercontent", 
               "https://images2-focus-opensocial\.googleusercontent", "\.com/gadgets/proxy\?container=focus&gadget=a&no_expand=1&resize_h=0&rewriteMime=image%2F\*&url=",
               "\.com/gadgets/proxy\?container=focus&gadget=vn&no_expand=1&refresh=31536000&resize_w=1600&rewriteMime=image%2F\*&url=", 
               "lh3", "lh4", "lh5", "lh6", "ggpht", "googleusercontent", "w642h", "/s160/", "/s72/", "/s94/", "/s110/", "/s128/", "/s288/", "/s320/", "/s800/",
               "thumb/", "jpeg", "jpeeg", "\.(jpg|JPG|png|PNG|gif|GIF|bmp)"]
    repl = [".\\1", "s1600/", "%2520", "/", ":", '', '', '', '', '', '', '', '', '1', '2', '3', '4', 'bp.blogspot', 'bp.blogspot', '1024x768', "/s1600/", "/s1600/", "/s1600/", "/s1600/", "/s1600/", "/s1600/", "/s1600/", "/s1600/",
            "big/", "jpg", "jpeg", ".\\1?imgmax=1600"]
    
    for i in range(0, len(pattern)):
        img = re.sub(pattern[i], repl[i], img)
    return img

def blockImg(img):
    result = False
    if img == '': result = True
    pattern = ["recruiting\d\.[a-z]+", "coppy-truyen18\.org\.[a-z]", "x+\.(jpg|png|gif|bmp)", "khung\.(jpg|png|gif|bmp)"]
    for ipattern in pattern:
        if re.search(ipattern, img): 
            result = True
            break
    return result

def getListImagesOfChapter(chapterName, url):
    print('>>> process chapter {0} : {1}'.format(chapterName, url))
    data = {'chapter': chapterName, 'images': []}
    try:
        html = Http.getHtml(url)
        html = re.sub("\[img\]|\[IMG\]", "<img src='", html)
        html = re.sub("\[/img\]|\[/IMG\]", "' />", html)
        tree = commonlib.build_tree_from_html(html)
        if tree == None: return
        for item in tree.xpath("//textarea[@id='truyen18-eedit']/p//img"):
            img = commonlib.normalize_str(item.get('src', ''))
            img = urljoin(url, img)
            img = standardImgTag(img)
            if (blockImg(img)): continue
            print img
            data['images'].append(img)
        if data['images']==0: return
    except:
        return
    return data

def escape_string(string):
    string = re.sub("'", "\'", string)
    string = re.sub('"', '\"', string)
    return string

def getStory(name, url):
    print('>> process story {0} : {1}'.format(name, url))
    try:
        tree = Http.getXMLTree(url)
        if tree == None:
            logger.warning("build tree failed")
            return False
        thumbnail = ''
        thumbNode = tree.xpath("//div[@class='barContent']/div[2]//tr[1]/td[1]/img")
        if len(thumbNode) > 0:
            thumbnail = commonlib.normalize_str(thumbNode[0].get('src', ''))
            if thumbnail != '': thumbnail = urljoin(url, thumbnail)
        author = 'Đang cập nhật'
        authorNode = tree.xpath("//span[contains(., 'Tác giả')]/following-sibling::*[1]".decode('utf-8'))
        if len(authorNode) > 0: author = commonlib.stringify(authorNode)
        descriptionNode = tree.xpath("//span[contains(., 'Sơ lược')]/following-sibling::*[1]".decode('utf-8'))
        description = commonlib.stringify(descriptionNode)
        theloaiNode = tree.xpath("//span[contains(., 'Thể loại')]/..".decode('utf-8'))
        tag = commonlib.stringify(theloaiNode)
        tag = re.sub('Thể loại: ', '', tag)
        data = []
        for item in tree.xpath("//div[@class='barContent chapterList']//table[@class='listing']//tr[position()>1]/td[1]/a"):
            link = commonlib.normalize_str(item.get('href', ''))
            if link == '': continue
            link = urljoin(url, link)
            chapterName = commonlib.stringify(item)
            item = getListImagesOfChapter(chapterName, link)
            if item != None: data.append(item)
        filePath = '{0}{1}'.format(output_folder, commonlib.crc32unsigned('{0}{1}'.format(name, url)))
        f = gzip.open(filePath, 'wb')
        if f:
            f.write(json.dumps(data))
            f.close()
            hashUrl = commonlib.md5(url)
            try:
                conn = getConnection(DBNAME, DBUSER, DBPASS, hostName='localhost')
                if conn != None:
                    cursor = conn.cursor()
                    cursor.execute("SELECT id FROM gq_ecchi WHERE hashUrl='{0}'".format(hashUrl))
                    sid = cursor.fetchone()
                    if sid != None:
                        sid = sid[0]
                        sql = '''UPDATE gq_ecchi SET name='{0}', author='{1}', description='{2}', tag='{3}', url='{4}', file='{5}', hashUrl='{6}' WHERE id={7}'''.format(escape_string(name), escape_string(author), escape_string(description), escape_string(tag), escape_string(url), os.path.basename(filePath), hashUrl, sid)
                    else:  
                        sql = '''INSERT INTO gq_ecchi(name, author, description, tag, url, file, hashUrl) VALUES('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}')'''.format(escape_string(name), escape_string(author), escape_string(description), escape_string(tag), escape_string(url), os.path.basename(filePath), hashUrl)
                    cursor.execute(sql)
                    cursor.close()
                    conn.commit()
                    conn.close()
            except:
                traceback.print_exc()
                #if os.path.isfile(filePath): os.unlink(filePath)
            return os.path.basename(filePath)
    except:
        exceptionMsg = traceback.format_exc()
        error_log("{0} - {1} - {2}".format(datetime.datetime.now(), exceptionMsg, url))
    return False
            
def process(url):
    try:
        data = {'name': [], 'link': []}
        while url != '':
            logger.debug('process url {0}'.format(url))
            tree = Http.getXMLTree(url)
            if tree == None:
                logger.warning("Can't build tree from html")
                break
            for item in tree.xpath("//div[@class='barContent']//table[@class='listing']//tr[position()>1]/td[1]/a"):
                link = commonlib.normalize_str(item.get('href', ''))
                if link == '': continue
                link = urljoin(url, link)
                link = re.sub(r"/truyen/", "/hentai/", link)
                name = commonlib.stringify(item)
                data['name'].append(name)
                data['link'].append(link)
            nextUrlNode = tree.xpath("//ul[@class='pager']/li[@class='current']/following-sibling::*[1]/a")
            if (nextUrlNode != None and (len(nextUrlNode) == 0)) or nextUrlNode == None: break
            nUrl = nextUrlNode[0].get('href', '')
            url = urljoin(url, nUrl) if nUrl != '' else ''
        print('>> OK! Done, load item success full')
        pool = workerpool.WorkerPool(size=2)
        pool.map(getStory, data['name'], data['link'])
        pool.shutdown()
        pool.wait()
    except:
        traceback.print_exc()
        error_log(url)
    return
        
if __name__ == '__main__':
    
    #createTable()           # create table gq_ecchi if not exists
    # define output folder
    output_folder = '/home5/vietcntt/public_html/site-api-vietcntt/res/truyen/ecchi-truyen-18/'
    if not os.path.exists(output_folder): os.makedirs(output_folder, 0777)
    if not output_folder.endswith('/'): output_folder += '/'
    logger = Log.getLogger("ecchi_tuyen18")
    
    url = 'http://www.truyen18.org/TheLoai-XemNhieu/ecchi-18/page/1.html'
    process(url)
#    getStory('Bitch Trap', 'http://www.truyen18.org/hentai/bitch-trap-18/3185.html')
    logger.debug("Finished at {0}".format(datetime.datetime.now()))
    os._exit(1)
