# -*- coding: utf-8 -*-
'''
Created on Apr 10, 2012

@author: LONG HOANG GIANG
'''
from BeautifulSoup import BeautifulSoup
from difflib import SequenceMatcher
from lxml import etree
from readability.readability import Document
from urllib import urlencode
import cStringIO as StringIO
import datetime
import gzip
import hashlib
import mechanize
import mimetypes
import os
import pickle
import re
import sys
import time
import traceback
import urllib
import zlib
import copy
import html2text as _html2text

USERAGENT = 'Mozilla/5.0 (Windows NT 6.2; rv:16.0) Gecko/20100101 Firefox/16.0'
STRING_EMPTY = ''

def pickle_dump(path, data, base=__file__):
    '''
    @param path: path of file to save data in
    @param data: data to save in file
    @base: base path to join with path
    '''
    base = os.path.dirname(base)
    path = os.path.join(base, path)
    f = open(path, 'w')
    try:
        pickle.dump(data, f)
    finally:
        if f != None: f.close()
        
def pickle_load(path, base=__file__):
    base = os.path.dirname(base)
    path = os.path.join(base, path)
    data = None
    f = None
    if not os.path.isfile(path): return data
    try:
        f = open(path, 'r')
        data = pickle.load(f)
    finally:
        if f != None: f.close()
    return data

def html2text(node, **kw):
    content = _html2text.html2text(Etree.tostring(node), **kw).strip()
    return content

def loadweb(url, **kw):
    '''
    @param url
    @param data: post data as dictionary
    @param user_agent: user agent string, default is firefox user agent
    @param cookie: cookie string
    @param referer: refer url to this page, some website check referer
    @param load_ajax: True of False    
    '''
    print '::load webpage {0}'.format(url)
    opener = mechanize.build_opener(mechanize.HTTPCookieProcessor, mechanize.HTTPRedirectHandler, mechanize.HTTPRefererProcessor)
    opener.addheaders = [("User-Agent", kw.get('user_agent', USERAGENT)), ('Accept-Encoding', 'gzip,deflate,sdch')]
    if kw.get('load_ajax', False):
        opener.addheaders.append(("X-Requested-With", "XMLHttpRequest"))
    for iheader in [['referer', 'Referer'], ['cookie', 'Cookie']]:
        if kw.get(iheader[0], '') != '':
            opener.addheaders.append((iheader[1], kw.get(iheader[0])))
    fpd = None
    try:
        response = CacheManager.get(url)
        if response == None:
            mechanize.install_opener(opener)
            data = kw.get('data', None)
            try:
                fpd = mechanize.urlopen(url, urlencode(data)) if data != None else mechanize.urlopen(url)
            except:
                fpd = urllib.urlopen(url, urlencode(data)) if data != None else urllib.urlopen(url)
            html = fpd.read()
            info = fpd.info()
            cookie = info.get('Set-Cookie', '')
            encoding = info.get('Content-Encoding', '')
            if encoding == 'gzip':
                gzipper = gzip.GzipFile(fileobj=StringIO.StringIO(html))
                html = gzipper.read()
            if len(html) > 0:
                while(ord(html[0])>128 or ord(html[0])<32): html = html[1:]
            response = HttpResponse(html=html, cookie=cookie, info=info)
            CacheManager.put(url, response)
    except:
        traceback.print_exc()
        response = HttpResponse()
    finally:
        if fpd != None: fpd.close()
    return response

def extractText(pat, matchStr, matchIdx, defaultVal = ''):
    try:
        result = defaultVal
        rexp = re.compile(pat, re.IGNORECASE)
        m = rexp.search(matchStr)
        if (m!=''):
            result = m.group(matchIdx)
        return result
    except:
        return defaultVal

def getGMT7Time():
    return datetime.datetime.utcnow() - datetime.timedelta(seconds=time.timezone)

def toUTCTimeString(obj):
    if type(obj).__name__ == 'date' or type(obj).__name__ == 'datetime':
        return obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
    return ''

def normalize_str(string):
    if string == None: return ''
    if isinstance(string, unicode):
        string = string.encode('utf-8')
    result = re.sub("\s+", ' ', string)
    return result.strip()

def crc32unsigned(textToHash=None):
    return str(zlib.crc32(textToHash) & 0xffffffffL)

def md5(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def downloadImage(url, path, prefix='', **kw):
    ''' @param: 
        - url: url of file
        - path: the absolute path of file in save folder, ex: /output/
        - prefix: the prefix of return path
        - cookie: cookie string
        - referer: referer url
        - hash: get file name by hash url, default value = False
    '''
    print '::start downloadImage: {0}'.format(url)
    try:
        if path[-1] != '/': path += '/'
        path = os.path.dirname(path)
        if not os.path.exists(path): os.makedirs(path, 0777)
        opener = mechanize.build_opener(mechanize.HTTPRedirectHandler)
        opener.addheaders = [("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0")]
        opener.addheaders.append(('Cookie', kw.get('cookie', '')))
        opener.addheaders.append(('Referer', kw.get('referer', '')))
        mechanize.install_opener(opener)
        filename = os.path.basename(url) if (re.search(r"jpg|jpe|gif|png|ico|bmp|JPG|JPE|GIF|PNG|ICO|BMP", url) and (kw.get('hash', False) == False)) else md5(url)
        filename = extractText(r"(^.+)\.(jpg|jpe|JPG|JPE|JPEG|jpeg|png|PNG|gif|GIF|ico|ICO|bmp|BMP)", filename, 1, filename)
        req = mechanize.Request(url)
        location = "{0}/{1}{2}" if path != '/' else "{0}{1}{2}"
        fp = mechanize.urlopen(req)
        file_extension = ''
        try:
            headers = fp.info()
            size = -1
            if 'content-length' in headers:
                size = long(headers.get('content-length', 0))
            if 'content-type' in headers:
                if 'text/html' in headers.get('content-type', ''): 
                    raise Exception, 'server response html instead of an image'
                file_extension = getFileExt(headers.get('content-type', 'image/jpeg'))
            filepath = location.format(path, filename, file_extension)
            isExists = False
            if os.path.isfile(filepath):
                if os.path.getsize(filepath) == size: isExists = True
            if not isExists:
                tfp = open(filepath, 'wb')
                try:
                    bs = 1024*8
                    read = 0
                    blocknum = 0
                    while 1:
                        block = fp.read(bs)
                        if block == "": break
                        read += len(block)
                        tfp.write(block)
                        blocknum += 1
                finally:
                    tfp.close()
                if os.path.getsize(filepath) == size: print '::finished {0} --> {1}'.format(url, filepath)
            else:
                print '::<exists>'
        except:
            print '::<error>: {0}'.format(sys.exc_info()[1])
            return STRING_EMPTY
        finally:
            fp.close()
        if not prefix.endswith('/') and (prefix != ''): prefix += '/'
        return '{0}{1}'.format(prefix, os.path.basename(filepath))
    except:
        return STRING_EMPTY

def download(url, path):
    opener = mechanize.build_opener(mechanize.HTTPRedirectHandler)
    opener.addheaders = [("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0")]
    mechanize.install_opener(opener)
    filename = substr_with_regex("(.+)\?*", os.path.basename(url), 1)
    if filename == '': filename = str(time.time())
    if not path.endswith("/"): path += "/"
    filepath = "{0}{1}".format(path, filename)
    if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath), 0777)
    response = mechanize.urlopen(url)
    header = response.info()
    if header.get('Content-Disposition', '') != '':
        filename = substr_with_regex("filename=\"(.+)\"", header.get('Content-Disposition', ''), 1)
        if filename != '': filepath = "{0}{1}".format(path, filename)
    else:
        content_type = header.get('Content-Type', '')
        file_extension = getFileExt(content_type)
        if (re.search("\.[a-zA-Z]{2,4}$", filepath)):
            filepath = re.sub("(\.[a-zA-Z]{2,4})$", file_extension, filepath)
        else:
            filepath = "{0}{1}{2}".format(path, md5(url), file_extension)
    f = open(filepath, 'wb')
    downloaded_size = 0
    print '>> start download {0} to {1}'.format(url, filepath)
    if f != None:
        block_size = 1024
        while 1:
            block = response.read(block_size)
            if block == '' or block == None: break;
            f.write(block)
            downloaded_size += len(block)
        f.close()
        del f
        del response
    file_download_size = long(header.get('Content-Length', 0))
    if (file_download_size != downloaded_size):
        if os.path.isfile(filepath): os.unlink(filepath)
        print 'Downloaded failed with url: {0}'.format(url)
    else:
        print 'Downloaded success: {0} to {1}'.format(url, filepath)
    return

def move_file(source, destination):
    print '::move_file from {0} to {1}'.format(source, destination)
    if not os.path.exists(source): return
    f1 = open(source, 'rb')
    if f1 != None:
        f2 = None
        try:
            if not os.path.exists(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination), 0777)
            f2 = open(destination, 'wb')
            block_size = 1024
            while 1:
                block = f1.read(block_size)
                if block == '' or block == None: break;
                f2.write(block)
        finally:
            if f2 != None: f2.close()
        f1.close()
        if os.path.isfile(destination) and os.path.isfile(source) and os.path.getsize(source) == os.path.getsize(destination):
            os.unlink(source)
    print os.path.isfile(destination)

def file_get_contents(filepath):
    if not os.path.isfile(filepath): return
    text = ''
    f = open(filepath, 'rb')
    if f != None:
        block_size = 1024
        while 1:
            block = f.read(block_size)
            if block == '' or block == None: break
            text += block
        f.close()
    return text

def file_put_contents(filepath, input):
    if not os.path.exists(os.path.dirname(filepath)):
        os.makedirs(os.path.dirname(filepath), 0777)
    f = open(filepath, 'w')
    if f != None:
        f.write(input)
        f.close()
    return

def stringify(node):
    if node == None: return ''
    _stringify = etree.XPath("string()")
    if type(node).__name__ == 'list':
        if len(node) > 0: node = node[0]
        else: return ''
    text = _stringify(node)
    try:
        text = toUnicodeDungSan(text)
    except:
        pass
    return normalize_str(text)

def build_tree_from_html(html, **args):
    tree = None
    try:
        parser = etree.XMLParser(encoding='utf-8') if args.get('_type', 'html') == 'xml' else etree.HTMLParser(encoding='utf-8')
        tree = etree.parse(StringIO.StringIO(html), parser=parser)
    except:
        traceback.print_exc()
    return tree

def getHtmlContent(html):
    ''' Sử dụng Readability để lấy phần nội dung của bài viết '''
    readable_article = encode_utf8_str(Document(html).summary())
    readable_article = re.sub(r"\</?(html|body)/?\>", '', readable_article)
    return readable_article

def encode_utf8_str(string):
    if string == None: return ''
    if isinstance(string, unicode): return string.encode('utf-8')
    return string

def substr_with_regex(pattern, string, index):
    result = ''
    try:
        m = re.search(pattern, string)
        if (m != None): result = m.group(index)
    except:
        pass
    return result

unicodeDungSan = 'a á à ả ã ạ ă ắ ằ ẳ ẵ ặ â ấ ầ ẩ ẫ ậ e é è ẻ ẽ ẹ ê ế ề ể ễ ệ i í ì ỉ ĩ ị o ó ò ỏ õ ọ ô ố ồ ổ ỗ ộ ơ ớ ờ ở ỡ ợ u ú ù ủ ũ ụ ư ứ ừ ử ữ ự y ý ỳ ỷ ỹ ỵ đ A Á À Ả Ã Ạ Ă Ắ Ằ Ẳ Ẵ Ặ Â Ấ Ầ Ẩ Ẫ Ậ E É È Ẻ Ẽ Ẹ Ê Ế Ề Ể Ễ Ệ I Í Ì Ỉ Ĩ Ị O Ó Ò Ỏ Õ Ọ Ô Ố Ồ Ổ Ỗ Ộ Ơ Ớ Ờ Ở Ỡ Ợ U Ú Ù Ủ Ũ Ụ Ư Ứ Ừ Ử Ữ Ự Y Ý Ỳ Ỷ Ỹ Ỵ Đ'
unicodeToHop =   'a á à ả ã ạ ă ắ ằ ẳ ẵ ặ â ấ ầ ẩ ẫ ậ e é è ẻ ẽ ẹ ê ế ề ể ễ ệ i í ì ỉ ĩ ị o ó ò ỏ õ ọ ô ố ồ ổ ỗ ộ ơ ớ ờ ở ỡ ợ u ú ù ủ ũ ụ ư ứ ừ ử ữ ự y ý ỳ ỷ ỹ ỵ đ A Á À Ả Ã Ạ Ă Ắ Ằ Ẳ Ẵ Ặ Â Ấ Ầ Ẩ Ẫ Ậ E É È Ẻ Ẽ Ẹ Ê Ế Ề Ể Ễ Ệ I Í Ì Ỉ Ĩ Ị O Ó Ò Ỏ Õ Ọ Ô Ố Ồ Ổ Ỗ Ộ Ơ Ớ Ờ Ở Ỡ Ợ U Ú Ù Ủ Ũ Ụ Ư Ứ Ừ Ử Ữ Ự Y Ý Ỳ Ỷ Ỹ Ỵ Đ'
aDungSan = unicodeDungSan.split(' ')
aToHop = unicodeToHop.split(' ')
def toUnicodeDungSan(string):
    ''' Chuyển chuỗi tiếng Việt Unicode tổ hợp về dạng Ascii không dấu '''
    res = string
    for i in range(len(aToHop)):
        try:
            res = res.replace(aToHop[i], aDungSan[i])
        except:
            pass
    return res

def toUpper(string):
    patterns = [ "á", "à", "ả", "ã", "ạ", "â", "ấ", "ầ", "ẩ", "ẫ", "ậ", "ă", "ắ", "ằ", "ẳ", "ẵ", "ặ",
                "é", "è", "ẻ", "ẽ", "ẹ", "ê", "ế", "ề", "ể", "ễ", "ệ", "đ",
                "í", "ì", "ỉ", "ĩ", "ị", "ý", "ỳ", "ỷ", "ỹ", "ỵ",
                "ó", "ò", "ỏ", "õ", "ọ", "ô", "ố", "ồ", "ổ", "ỗ", "ộ", "ơ", "ớ", "ờ", "ở", "ỡ", "ợ",
                "ú", "ù", "ủ", "ũ", "ụ", "ư", "ứ", "ừ", "ử", "ữ", "ự" ]
    
    replaces = [ "Á", "À", "Ả", "Ã", "Ạ", "Â", "Ấ", "Ầ", "Ẩ", "Ẫ", "Ậ", "Ă", "Ắ", "Ằ", "Ẳ", "Ẵ", "Ặ",
                "É", "È", "Ẻ", "Ẽ", "Ẹ", "Ê", "Ế", "Ề", "Ể", "Ễ", "Ệ", "Đ",
                "Í", "Ì", "Ỉ", "Ĩ", "Ị", "Ý", "Ỳ", "Ỷ", "Ỹ", "Ỵ",
                "Ó", "Ò", "Ỏ", "Õ", "Ọ", "Ô", "Ố", "Ồ", "Ổ", "Ỗ", "Ộ", "Ơ", "Ớ", "Ờ", "Ở", "Ỡ", "Ợ",
                "Ú", "Ù", "Ủ", "Ũ", "Ụ", "Ư", "Ứ", "Ừ", "Ử", "Ữ", "Ự" ]
    
    for i in range(0,len(patterns)):
        pattern = patterns[i]
        replace = replaces[i]
        string = re.sub(r"{0}".format(pattern), replace, string)
    return string.upper()

def toAscii(string):
    ''' Chuyển chuỗi tiếng Việt Unicode tổ hợp về dạng Ascii không dấu '''
    if string == '': return ''
    string = encode_utf8_str(string)
    listPattern = ["á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", "Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   "đ", "Đ", "í|ì|ỉ|ị|ĩ", "Í|Ì|Ỉ|Ị|Ĩ", "é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", "É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   "ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", "Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   "ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", "Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", "ý|ỳ|ỷ|ỵ|ỹ", "Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    for idx in range(len(listPattern)):
        string = re.sub(listPattern[idx], rep[idx], string)
    return string

def unescapeHTML(inputStr, **args):
    '''Chuyen tu html entities thanh ky tu binh thuong vd &nbsp; thanh dau space'''

    from htmlentitydefs import name2codepoint
    if isinstance(inputStr, unicode): inputStr = input.encode('utf-8')
    result = inputStr
    try:
        result = re.sub(r'&#(\d+);', lambda m: unichr(int(m.group(1))), result)
        result = re.sub(r'&(\w+);', lambda m: unichr(name2codepoint[m.group(1)]), result)
        result = re.sub(r'&#(\d+);|&(\w+);', '', result)
    except:
        traceback.print_exc()
    return result

def convertHTMLEntitiesToUnicode(html):
    soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES)
    return unicode(soup.body or soup).encode('utf-8')

def compareString(a, b):
    s = SequenceMatcher()
    s.set_seqs(a, b)
    return s.ratio()

def sameOfTwoString(a, b):
    if a == None: a = ''
    if b == None: b = ''
    if a == '' or b == '': return ''
    s = SequenceMatcher()
    s.set_seqs(a, b)
    result = s.find_longest_match(0, len(a), 0, len(b))
    i = result[0]; step = result[2]
    return a[i: i+step] 

def getFileExt(mime):
    mimetypes.init()
    try:
        ext = mimetypes.guess_extension(mime)
    except:
        ext = ''
    if ext in ['.jpe', '.JPE', '.jpeg', '.JPEG']: ext = '.jpg'
    return ext


class Etree():
    
    @staticmethod
    def clean_elements(node, tag='*', attrs={}):
        if node == None: return
        if type(node).__name__ == 'list':
            if len(node) == 0: return
            node = node[0]
        
        if type(tag).__name__ == 'list':
            for itag in tag:
                Etree.clean_elements(node, itag, attrs)
        else:
            if tag == '' or tag == None: tag = '*'
            if tag == '*' and len(attrs.keys()) == 0: return
            xp = ".//{0}".format(tag)
            if len(attrs.keys()) > 0:
                xp += "["
                c = 0
                for k in attrs.keys():
                    xp += "contains(@{0}, '{1}')".format(k, attrs[k])
                    if len(attrs.keys()) - 1 == c: continue
                    c += 1
                    xp += " and "
                xp += "]"
            for ichild in node.xpath(xp):
                ichild.getparent().remove(ichild)
        return
    
    @staticmethod
    def clean(node):
        if node == None: return
        if type(node).__name__ == '_Element':
            node.getparent().remove(node) 
        elif type(node).__name__ == 'list':
            Etree.clean(inode for inode in node)
        return
    
    @staticmethod
    def clean_following_sibling(node, clean_self=False):
        if node == None: return
        if type(node).__name__ == 'list':
            for inode in node: Etree.clean_following_sibling(inode, clean_self)
        else:
            for item in node.xpath("./following-sibling::*"): 
                item.getparent().remove(item)
            if clean_self: node.getparent().remove(node)
        return
    
    @staticmethod
    def clean_preceding_sibling(node, clean_self=False):
        if node == None: return
        if type(node).__name__ == 'list':
            for inode in node: Etree.clean_following_sibling(inode, clean_self)
        else:
            for item in node.xpath("./preceding-sibling::*"): 
                item.getparent().remove(item)
            if clean_self: node.getparent().remove(node)
        return
    
    @staticmethod
    def clean_comment_tag(html):
        html = re.sub(r"<!--(.+)-->", "", html)
        return html
    
    @staticmethod
    def tostring(node):
        if node == None: return STRING_EMPTY
        html = ''
        if '_Element' in type(node).__name__:
            return Etree.clean_comment_tag(convertHTMLEntitiesToUnicode(etree.tounicode(node, pretty_print=True).encode('utf-8')))
        if type(node).__name__ == 'list':
            idx = 0
            for inode in node:
                html += Etree.tostring(inode)
                if len(node)-1 > idx: html += '\n<p>&nbsp;</p>\n'
                idx += 1
        return html

class HttpResponse():
    
    def __init__(self, **kw):
        '''
        @param html: code html
        @param cookie: cookie string
        @param info: headers from server res
        '''
        self.html = kw.get('html', '')
        self.cookie = kw.get('cookie', '')
        self.ifo = kw.get('info', None)
        
    def get_html(self):
        return self.html
    
    def info(self):
        return self.ifo
    
    def get_cookie(self):
        return self.cookie
    
    def build_tree(self, **kw):
        '''
        @param scheme: html or xml for parser, default is html
        '''
        tree = None
        try:
            parser = etree.XMLParser(encoding='utf-8') if kw.get('scheme', 'html') == 'xml' else etree.HTMLParser(encoding='utf-8')
            tree = etree.parse(StringIO.StringIO(self.html), parser)
        except:
            print sys.exc_info()[1]
        return tree

class CacheManager():
    
    __cache_folder = '/longhoanggiang/cache/'
    
    @staticmethod
    def get(url):
        fileid = md5(url)
        cachefile = CacheManager.__cache_folder + fileid
        try:
            result = pickle_load(cachefile)
        except:
            result = None
        return result
    
    @staticmethod
    def put(url, data):
        fileid = md5(url)
        cachefile = CacheManager.__cache_folder + fileid
        pickle_dump(cachefile, data)
    
