# -*- coding: utf-8 -*-
'''
Created on Mar 24, 2013

@author: LONG HOANG GIANG
'''
from AESCipher import AESCipher
from BeautifulSoup import BeautifulSoup
from HttpResponse import HttpResponse
from cache import CacheManager
from lxml import etree
from pattern.web import plaintext
from readability.readability import Document
from urllib import urlencode
import cPickle
import cStringIO
import gzip as _gzip
import hashlib
import json
import logging
import mechanize
import mimetypes
import os
import re
import sys
import time as _time
import traceback
import urllib
import zlib

USERAGENT_FIREFOX = 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0'
XML_PARSER = etree.XMLParser(encoding='utf-8')
HTML_PARSER = etree.HTMLParser(encoding='utf-8')
PATH_SEPARATE = '/'


logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

def raw(text):
    escape_dict={
        '\a':r'\a',
        '\b':r'\b',
        '\c':r'\c',
        '\f':r'\f',
        '\n':r'\n',
        '\r':r'\r',
        '\t':r'\t',
        '\v':r'\v',
        '\'':r'\'',
        '\"':r'\"',
        '\0':r'\0',
        '\1':r'\1',
        '\2':r'\2',
        '\3':r'\3',
        '\4':r'\4',
        '\5':r'\5',
        '\6':r'\6',
        '\7':r'\7',
        '\8':r'\8',
        '\9':r'\9'
    }
    return "".join([escape_dict.get(char, char) for char in text])
    

def decryptCipher(cipherText):
    cipher = AESCipher()
    cipherData = cipher.decrypt(cipherText)
    return cipherData

def encryptCipher(data):
    cipher = AESCipher()
    cipherData = cipher.encrypt(data)
    return cipherData

def encryptCipherFile(path):
    fp = _gzip.open(path, "rb")
    data = fp.read()
    fp.close()
    if json.loads(data):
        cipher = AESCipher()
        cipherData = cipher.encrypt(data)
        fp = _gzip.open(path, "wb")
        fp.write(cipherData)
        fp.close()

def getCacheDir():
    path = "/longhoanggiang/cache/"
    createIfNotExistsPath(path)
    return path
    
def gzip(filename, data):
    createIfNotExistsPath(os.path.dirname(filename))
    f = _gzip.open(filename, 'wb')
    f.write(data)
    f.close()

def createIfNotExistsPath(path):
    if path == '' or path == '.': return
    if not path.endswith('/'): path += '/'
    if not os.path.isdir(path): os.makedirs(path, 0777)

def loadWeb(url, data={}, cached=True, referer='', cookiestr='', useragent='', sendajax=False):
    print '> start loadWeb from url {0}'.format(url)
    firefoxUserAgent = "Mozilla/5.0 (Windows NT 6.2; rv:20.0) Gecko/20100101 Firefox/20.0"
    opener = mechanize.build_opener(
                mechanize.HTTPCookieProcessor, 
                mechanize.HTTPRefererProcessor, 
                mechanize.HTTPRedirectHandler)
    opener.addheaders = [("User-Agent", useragent if useragent != '' else firefoxUserAgent), 
                         ("Referer", referer),
                         ("Cookie", cookiestr)]
    if (sendajax):
        opener.addheaders.append(("X-Requested-With", "XMLHttpRequest"))
    cachedFile = getCacheDir() + md5(url + str(data))
    if not cached and os.path.isfile(cachedFile): os.unlink(cachedFile)
    httpResponse = CacheManager.get(cachedFile) if cached else None
    if httpResponse != None: return httpResponse
    mechanize.install_opener(opener)
    itime = 0
    httpResponse = HttpResponse(None)
    hasError = False
    while itime < 3:
        try:
            try:
                fp = mechanize.urlopen(url, urlencode(data)) if len(data.items()) == 0 else mechanize.urlopen(url)
            except:
                fp = urllib.urlopen(url, urlencode(data)) if len(data.items()) == 0 else urllib.urlopen(data)
            info = fp.info()
            content = fp.read()
            responseUrl = fp.geturl()
            if ('gzip' in info.get('Content-Encoding', '')):
                gzipper = _gzip.GzipFile(fileobj=cStringIO.StringIO(content))
                data = gzipper.read()
            httpResponse = HttpResponse(content)
            httpResponse.set(info, fp.code, responseUrl, info.get('Set-Cookie', ''))
            fp.close()
            CacheManager.put(cachedFile, httpResponse, 864000)
            hasError = False
            break
        except:
            hasError = True
            itime += 1
            _time.sleep(5)
            print '> [e]:{0}:{1}:{2}'.format(itime, sys.exc_info()[1], url)
    if hasError: CacheManager.remove(cachedFile)
    return httpResponse
        
def downloadImage(url, path, prefix='', **kw):
    ''' @param: 
        - url: url of file
        - path: the absolute path of file in save folder, ex: /output/
        - prefix: the prefix of return path
        - cookie: cookie string
        - referer: referer url
        - hash: get file name by hash url, default value = False
    '''
    print '> start downloadImage: {0}'.format(url)
    try:
        if path[-1] != '/': path += '/'
        path = os.path.dirname(path)
        if not os.path.exists(path): os.makedirs(path, 0777)
        opener = mechanize.build_opener(mechanize.HTTPRedirectHandler)
        opener.addheaders = [("User-Agent", USERAGENT_FIREFOX)]
        for item in [['cookie', 'Cookie'], ['referer', 'Referer']]:
            v = kw.get(item[0], '')
            if v != '': opener.addheaders.append((item[1], v))
        mechanize.install_opener(opener)
        filename = os.path.basename(url) if (re.search(r"jpg|jpe|gif|png|ico|bmp|JPG|JPE|GIF|PNG|ICO|BMP", url) and (kw.get('hash', False) == False)) else md5(url)
        filename = extractText(r"(^.+)\.(jpg|jpe|JPG|JPE|JPEG|jpeg|png|PNG|gif|GIF|ico|ICO|bmp|BMP)", filename, 1, filename)
        req = mechanize.Request(url)
        location = "{0}/{1}{2}" if path != '/' else "{0}{1}{2}"
        fp = mechanize.urlopen(req)
        file_extension = ''
        try:
            headers = fp.info()
            size = -1
            if 'content-length' in headers:
                size = long(headers.get('content-length', 0))
            if 'content-type' in headers:
                if 'text/html' in headers.get('content-type', ''): 
                    raise Exception, 'server response html instead of an image'
                file_extension = getFileExt(headers.get('content-type', 'image/jpeg'))
            filepath = location.format(path, filename, file_extension)
            isExists = False
            if os.path.isfile(filepath):
                if os.path.getsize(filepath) == size: isExists = True
            if not isExists:
                tfp = open(filepath, 'wb')
                try:
                    bs = 1024*8
                    read = 0
                    blocknum = 0
                    while 1:
                        block = fp.read(bs)
                        if block == "": break
                        read += len(block)
                        tfp.write(block)
                        blocknum += 1
                finally:
                    tfp.close()
                if os.path.getsize(filepath) == size: print '> finished {0} --> {1}'.format(url, filepath)
            else:
                print '> image already exists'
        except:
            traceback.print_exc()
            return ""
        finally:
            fp.close()
        if not prefix.endswith('/') and (prefix != ''): prefix += '/'
        return '{0}{1}'.format(prefix, os.path.basename(filepath))
    except:
        return ""

def getFileExt(mime):
    ext = ''
    try:
        mimetypes.init()
        ext = mimetypes.guess_extension(mime)
        if ext in ['.jpe', '.JPE', '.jpeg', '.JPEG']: ext = '.jpg'
    except:
        pass
    return ext
    
def stringify(node):
    _stringify = etree.XPath("string()")
    if '_Element' in type(node).__name__:
        return normalizeStr(_stringify(node))
    elif type(node).__name__ == 'list':
        for inode in node:
            return normalizeStr(_stringify(inode))
    return ''

def normalizeStr(string):
    if type(string).__name__ in ['unicode', '_ElementUnicodeResult']:
        string = string.encode('utf-8')
    return string.strip()

def cleanHTMLEntities(html):
    soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES)
    return unicode(soup.body or soup)

def crc32unsigned(textToHash=None):
    return str(zlib.crc32(textToHash) & 0xffffffffL)

def md5(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def extractText(pattern, match, idx, default=''):
    result = default
    try:
        m = re.search(pattern, match)
        if m != None: result = m.group(idx)
    except IndexError:
        pass
    return result

def toUTCTimeString(obj):
    if type(obj).__name__ in ['date', 'datetime']:
        return obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
    return ""
    
def buildTree(html, parser=HTML_PARSER, base_url=None):
    tree = etree.parse(cStringIO.StringIO(html), parser=parser, base_url=base_url)
    return tree

unicodeDungSan = 'a á à ả ã ạ ă ắ ằ ẳ ẵ ặ â ấ ầ ẩ ẫ ậ e é è ẻ ẽ ẹ ê ế ề ể ễ ệ i í ì ỉ ĩ ị o ó ò ỏ õ ọ ô ố ồ ổ ỗ ộ ơ ớ ờ ở ỡ ợ u ú ù ủ ũ ụ ư ứ ừ ử ữ ự y ý ỳ ỷ ỹ ỵ đ A Á À Ả Ã Ạ Ă Ắ Ằ Ẳ Ẵ Ặ Â Ấ Ầ Ẩ Ẫ Ậ E É È Ẻ Ẽ Ẹ Ê Ế Ề Ể Ễ Ệ I Í Ì Ỉ Ĩ Ị O Ó Ò Ỏ Õ Ọ Ô Ố Ồ Ổ Ỗ Ộ Ơ Ớ Ờ Ở Ỡ Ợ U Ú Ù Ủ Ũ Ụ Ư Ứ Ừ Ử Ữ Ự Y Ý Ỳ Ỷ Ỹ Ỵ Đ'
unicodeToHop =   'a á à ả ã ạ ă ắ ằ ẳ ẵ ặ â ấ ầ ẩ ẫ ậ e é è ẻ ẽ ẹ ê ế ề ể ễ ệ i í ì ỉ ĩ ị o ó ò ỏ õ ọ ô ố ồ ổ ỗ ộ ơ ớ ờ ở ỡ ợ u ú ù ủ ũ ụ ư ứ ừ ử ữ ự y ý ỳ ỷ ỹ ỵ đ A Á À Ả Ã Ạ Ă Ắ Ằ Ẳ Ẵ Ặ Â Ấ Ầ Ẩ Ẫ Ậ E É È Ẻ Ẽ Ẹ Ê Ế Ề Ể Ễ Ệ I Í Ì Ỉ Ĩ Ị O Ó Ò Ỏ Õ Ọ Ô Ố Ồ Ổ Ỗ Ộ Ơ Ớ Ờ Ở Ỡ Ợ U Ú Ù Ủ Ũ Ụ Ư Ứ Ừ Ử Ữ Ự Y Ý Ỳ Ỷ Ỹ Ỵ Đ'
aDungSan = unicodeDungSan.split(' ')
aToHop = unicodeToHop.split(' ')

def toUnicodeDungSan(string):
    ''' Chuyển chuỗi tiếng Việt Unicode tổ hợp về dạng Ascii không dấu '''
    res = string
    for i in range(len(aToHop)):
        try:
            res = res.replace(aToHop[i], aDungSan[i])
        except:
            pass
    return res

def toLower(string):
    replaces = [ "á", "à", "ả", "ã", "ạ", "â", "ấ", "ầ", "ẩ", "ẫ", "ậ", "ă", "ắ", "ằ", "ẳ", "ẵ", "ặ",
                "é", "è", "ẻ", "ẽ", "ẹ", "ê", "ế", "ề", "ể", "ễ", "ệ", "đ",
                "í", "ì", "ỉ", "ĩ", "ị", "ý", "ỳ", "ỷ", "ỹ", "ỵ",
                "ó", "ò", "ỏ", "õ", "ọ", "ô", "ố", "ồ", "ổ", "ỗ", "ộ", "ơ", "ớ", "ờ", "ở", "ỡ", "ợ",
                "ú", "ù", "ủ", "ũ", "ụ", "ư", "ứ", "ừ", "ử", "ữ", "ự" ]
    
    patterns = [ "Á", "À", "Ả", "Ã", "Ạ", "Â", "Ấ", "Ầ", "Ẩ", "Ẫ", "Ậ", "Ă", "Ắ", "Ằ", "Ẳ", "Ẵ", "Ặ",
                "É", "È", "Ẻ", "Ẽ", "Ẹ", "Ê", "Ế", "Ề", "Ể", "Ễ", "Ệ", "Đ",
                "Í", "Ì", "Ỉ", "Ĩ", "Ị", "Ý", "Ỳ", "Ỷ", "Ỹ", "Ỵ",
                "Ó", "Ò", "Ỏ", "Õ", "Ọ", "Ô", "Ố", "Ồ", "Ổ", "Ỗ", "Ộ", "Ơ", "Ớ", "Ờ", "Ở", "Ỡ", "Ợ",
                "Ú", "Ù", "Ủ", "Ũ", "Ụ", "Ư", "Ứ", "Ừ", "Ử", "Ữ", "Ự" ]
    
    for i in range(0,len(patterns)):
        pattern = patterns[i]
        replace = replaces[i]
        string = re.sub(r"{0}".format(pattern), replace, string)
    return string.lower()
    

def toUpper(string):
    patterns = [ "á", "à", "ả", "ã", "ạ", "â", "ấ", "ầ", "ẩ", "ẫ", "ậ", "ă", "ắ", "ằ", "ẳ", "ẵ", "ặ",
                "é", "è", "ẻ", "ẽ", "ẹ", "ê", "ế", "ề", "ể", "ễ", "ệ", "đ",
                "í", "ì", "ỉ", "ĩ", "ị", "ý", "ỳ", "ỷ", "ỹ", "ỵ",
                "ó", "ò", "ỏ", "õ", "ọ", "ô", "ố", "ồ", "ổ", "ỗ", "ộ", "ơ", "ớ", "ờ", "ở", "ỡ", "ợ",
                "ú", "ù", "ủ", "ũ", "ụ", "ư", "ứ", "ừ", "ử", "ữ", "ự" ]
    
    replaces = [ "Á", "À", "Ả", "Ã", "Ạ", "Â", "Ấ", "Ầ", "Ẩ", "Ẫ", "Ậ", "Ă", "Ắ", "Ằ", "Ẳ", "Ẵ", "Ặ",
                "É", "È", "Ẻ", "Ẽ", "Ẹ", "Ê", "Ế", "Ề", "Ể", "Ễ", "Ệ", "Đ",
                "Í", "Ì", "Ỉ", "Ĩ", "Ị", "Ý", "Ỳ", "Ỷ", "Ỹ", "Ỵ",
                "Ó", "Ò", "Ỏ", "Õ", "Ọ", "Ô", "Ố", "Ồ", "Ổ", "Ỗ", "Ộ", "Ơ", "Ớ", "Ờ", "Ở", "Ỡ", "Ợ",
                "Ú", "Ù", "Ủ", "Ũ", "Ụ", "Ư", "Ứ", "Ừ", "Ử", "Ữ", "Ự" ]
    
    for i in range(0,len(patterns)):
        pattern = patterns[i]
        replace = replaces[i]
        string = re.sub(r"{0}".format(pattern), replace, string)
    return string.upper()

def toAscii(string):
    ''' Chuyển chuỗi tiếng Việt Unicode tổ hợp về dạng Ascii không dấu '''
    if string == '': return ''
    if isinstance(string, unicode): string = string.encode('utf-8')
    listPattern = ["á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", "Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   "đ", "Đ", "í|ì|ỉ|ị|ĩ", "Í|Ì|Ỉ|Ị|Ĩ", "é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", "É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   "ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", "Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   "ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", "Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", "ý|ỳ|ỷ|ỵ|ỹ", "Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    for idx in range(len(listPattern)):
        string = re.sub(listPattern[idx], rep[idx], string)
    return string    

def cleanHTML(html, keep=[]):
    raw = plaintext(html, keep).encode('utf-8')
    return raw

def getHTMLContent(html):
    readable_article = Document(html).summary()
    readable_article = re.sub(r"\</?(html|body)/?\>", '', readable_article)
    return readable_article

def pickleDumps(path, obj):
    p = os.path.dirname(path)
    try:
        if p != '' and not os.path.exists(p):
            os.makedirs(p, 0777)
        fp = open(path, 'wb')
        cPickle.dump(obj, fp)
        fp.close()
    except:
        return False
    return True

def pickleLoads(path):
    if os.path.exists(path):
        try:
            fp = open(path, 'rb')
            obj = cPickle.load(path)
            fp.close()
            return obj
        except:
            traceback.print_exc()
    return None
            
        