# -*- coding: utf-8 -*-
'''
Created on Sep 4, 2013

@author: LONG HOANG GIANG
'''
import gzip
import logging
import sys
import re
import hashlib
import zlib
from BeautifulSoup import BeautifulSoup
import useragent as _useragent
import mechanize
from urllib import urlencode
import urllib
import cStringIO
from lxml import etree
import const
import html2text as _html2text
import os
from cipher import AESCipher
from httpresponse import HttpResponse
from lib.cache import CacheManager
import traceback
import mimetypes
import glob
from readability.readability import Document

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

def getArticleContent(html):
    readable_article = Document(html).content()
    readable_article = readable_article[6:-7]  # cat bo <body> & </body>
    soup = BeautifulSoup(readable_article, convertEntities=BeautifulSoup.HTML_ENTITIES)
    content = unicode(soup.body or soup).encode('utf-8')
    return content

def buildTreeFromHtml(html):
    parser = etree.HTMLParser(encoding='utf-8')
    tree = etree.parse(cStringIO.StringIO(html), parser)
    return tree
    
def getPNode(node, tag):
    if type(node).__name__ == 'list' and len(node) > 0:
        node = node[0]
    while (node != None) and (node.getparent() != None):
        if node.tag == tag: return node
        node = node.getparent()
    return None

def cleanNextNodeOrSelf(node, clean_self=False):
    if (type(node).__name__ == 'list') and (len(node) > 0):
        node = node[0]
    if node == None: return
    for nextNode in node.getnext():
        nextNode.getparent().remove(nextNode)
    if (clean_self):
        node.getparent().remove(node)
    
def getGlob(path):
    numbers = re.compile(r'(\d+)')
    def numericalSort(value):
        parts = numbers.split(value)
        parts[1::2] = map(int, parts[1::2])
        return parts
    data = sorted(glob.glob(path), key=numericalSort)
    return data
    
# get text from dom node
def getText(node, paragraph_break=1, breakline_html=False, callback=None):
    output = ""
    break_tag = "\n" if not breakline_html else "<br />"
    if type(node).__name__ == '_Element':
        breakLine = 0
        for inode in node.xpath("./descendant-or-self::*"):
            if inode.tag in ['style', 'script']: continue
            if (inode.tag in ['p', 'h1', 'h2', 'br', 'div', 'h3', 'h4', 'hr']):
                breakLine = paragraph_break if inode.tag in ['p'] else 1
                if inode.tag in ['div'] and len(output)>0:
                    output += break_tag
            if inode.tag in ['a', 'img'] and callback != None:
                r = callback(inode)
                if r != None:
                    output += r
            for itextNode in (inode.text, inode.tail):
                if (itextNode != None):
                    itext = re.sub(r"(\s{2,}|\n+)", ' ', itextNode.strip()).strip()
                    if (itext != '' and (len(itext) > 0)):
                        try:
                            if itext[0] in [',', '.'] and output[len(itext)-1] == ' ':
                                output = output[0:-2]
                        except:
                            pass
                        if breakLine > 0:
                            output += break_tag * breakLine
                            breakLine = 0
                        output += itext + " "
    elif(type(node).__name__ == 'list'):
        for childNode in node:
            output += getText(childNode, paragraph_break, breakline_html, callback)
            if len(node) > 1:
                output += break_tag
    return output.strip()

def compressStr(inputStr):
    zbuf = cStringIO.StringIO()
    zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
    zfile.write(inputStr)
    zfile.close()
    return zbuf.getvalue()

def decompressStr(compressData):
    zbuf = cStringIO.StringIO(compressData)
    zfile = gzip.GzipFile(mode='rb', fileobj=zbuf, compresslevel=9)
    return zfile.read()
    

def encryptCipher(data):
    cipher = AESCipher()
    cipherData = cipher.encrypt(data)
    return cipherData

def decryptCipher(cipherText):
    cipher = AESCipher()
    cipherData = cipher.decrypt(cipherText)
    return cipherData

def cleanHTMLEntities(html):
    soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES)
    return unicode(soup.body or soup).encode('utf-8')

def crc32unsigned(textToHash=None):
    return str(zlib.crc32(textToHash) & 0xffffffffL)

def md5(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()


def makedir(path):
    if path in ['', '.']: return
    if not path.endswith('/'): path += '/'
    if not os.path.exists(path):
        os.makedirs(path, 0777)

################################################################
#        TEXT 
################################################################

def html2text(node, baseurl='', encoding='utf-8', textmode=False, **kwargs):
    datatype = type(node).__name__
    if datatype == 'str':
        html = node
    else:
        from etree import Etree
        html = Etree.tostring(node)
    return _html2text.getsimplehtml(html, baseurl, encoding, textmode, **kwargs)

def node2Text(node):
    result = [ ]
    if node.text:
        result.append(node.text)
    for sel in node:
        if sel.tag in ["tr", "td", "table"]:
            result.append("<%s>" % sel.tag)
            result.append(node2Text(sel))
            result.append("</%s>" % sel.tag)
        else:
            result.append(node2Text(sel))
        if sel.tail:
            result.append(sel.tail)
    return "".join(result).strip()
    
def stringify(node):
    if '_Element' in type(node).__name__:
        _stringify = etree.XPath("string()")
        return normalizeStr(_stringify(node))
    elif type(node).__name__ == 'list':
        data = ""
        for inode in node:
            data += stringify(inode)
            data += " "
        return normalizeStr(data)
    return const.EMPTY

def extractText(pattern, match, idx, default=''):
    try:
        m = re.search(pattern, match)
        if m != None: 
            return m.group(idx)
    except IndexError:
        pass
    return default

def findIter(pattern, match, idx):
    result = []
    for m in re.finditer(pattern, match):
        try:
            result.append(m.group(idx))
        except:
            traceback.print_exc()
    return result

def normalizeStr(string):
    if type(string).__name__ in ['unicode', '_ElementUnicodeResult']:
        string = string.encode('utf-8', 'ignore')
    string = re.sub(r"\s{3,}", ' ', string.replace("\n", ''))
    return string.strip()

def toUpper(string):
    patterns = [ "á", "à", "ả", "ã", "ạ", "â", "ấ", "ầ", "ẩ", "ẫ", "ậ", "ă", "ắ", "ằ", "ẳ", "ẵ", "ặ",
                "é", "è", "ẻ", "ẽ", "ẹ", "ê", "ế", "ề", "ể", "ễ", "ệ", "đ",
                "í", "ì", "ỉ", "ĩ", "ị", "ý", "ỳ", "ỷ", "ỹ", "ỵ",
                "ó", "ò", "ỏ", "õ", "ọ", "ô", "ố", "ồ", "ổ", "ỗ", "ộ", "ơ", "ớ", "ờ", "ở", "ỡ", "ợ",
                "ú", "ù", "ủ", "ũ", "ụ", "ư", "ứ", "ừ", "ử", "ữ", "ự" ]
    
    replaces = [ "Á", "À", "Ả", "Ã", "Ạ", "Â", "Ấ", "Ầ", "Ẩ", "Ẫ", "Ậ", "Ă", "Ắ", "Ằ", "Ẳ", "Ẵ", "Ặ",
                "É", "È", "Ẻ", "Ẽ", "Ẹ", "Ê", "Ế", "Ề", "Ể", "Ễ", "Ệ", "Đ",
                "Í", "Ì", "Ỉ", "Ĩ", "Ị", "Ý", "Ỳ", "Ỷ", "Ỹ", "Ỵ",
                "Ó", "Ò", "Ỏ", "Õ", "Ọ", "Ô", "Ố", "Ồ", "Ổ", "Ỗ", "Ộ", "Ơ", "Ớ", "Ờ", "Ở", "Ỡ", "Ợ",
                "Ú", "Ù", "Ủ", "Ũ", "Ụ", "Ư", "Ứ", "Ừ", "Ử", "Ữ", "Ự" ]
    
    for i in range(0,len(patterns)):
        pattern = patterns[i]
        replace = replaces[i]
        string = re.sub(r"{0}".format(pattern), replace, string)
    return string.upper()

def toLower(string):
    replaces = [ "á", "à", "ả", "ã", "ạ", "â", "ấ", "ầ", "ẩ", "ẫ", "ậ", "ă", "ắ", "ằ", "ẳ", "ẵ", "ặ",
                "é", "è", "ẻ", "ẽ", "ẹ", "ê", "ế", "ề", "ể", "ễ", "ệ", "đ",
                "í", "ì", "ỉ", "ĩ", "ị", "ý", "ỳ", "ỷ", "ỹ", "ỵ",
                "ó", "ò", "ỏ", "õ", "ọ", "ô", "ố", "ồ", "ổ", "ỗ", "ộ", "ơ", "ớ", "ờ", "ở", "ỡ", "ợ",
                "ú", "ù", "ủ", "ũ", "ụ", "ư", "ứ", "ừ", "ử", "ữ", "ự" ]
    
    patterns = [ "Á", "À", "Ả", "Ã", "Ạ", "Â", "Ấ", "Ầ", "Ẩ", "Ẫ", "Ậ", "Ă", "Ắ", "Ằ", "Ẳ", "Ẵ", "Ặ",
                "É", "È", "Ẻ", "Ẽ", "Ẹ", "Ê", "Ế", "Ề", "Ể", "Ễ", "Ệ", "Đ",
                "Í", "Ì", "Ỉ", "Ĩ", "Ị", "Ý", "Ỳ", "Ỷ", "Ỹ", "Ỵ",
                "Ó", "Ò", "Ỏ", "Õ", "Ọ", "Ô", "Ố", "Ồ", "Ổ", "Ỗ", "Ộ", "Ơ", "Ớ", "Ờ", "Ở", "Ỡ", "Ợ",
                "Ú", "Ù", "Ủ", "Ũ", "Ụ", "Ư", "Ứ", "Ừ", "Ử", "Ữ", "Ự" ]
    
    for i in range(0,len(patterns)):
        pattern = patterns[i]
        replace = replaces[i]
        string = re.sub(r"{0}".format(pattern), replace, string)
    return string.lower()

def toKd(string):
    ''' Chuyển chuỗi tiếng Việt Unicode tổ hợp về dạng Ascii không dấu '''
    if string == '': return ''
    if isinstance(string, unicode): string = string.encode('utf-8')
    listPattern = ["á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", "Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   "đ", "Đ", "í|ì|ỉ|ị|ĩ", "Í|Ì|Ỉ|Ị|Ĩ", "é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", "É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   "ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", "Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   "ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", "Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", "ý|ỳ|ỷ|ỵ|ỹ", "Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    for idx in range(len(listPattern)):
        string = re.sub(listPattern[idx], rep[idx], string)
    return string    

unicodeDungSan = 'a á à ả ã ạ ă ắ ằ ẳ ẵ ặ â ấ ầ ẩ ẫ ậ e é è ẻ ẽ ẹ ê ế ề ể ễ ệ i í ì ỉ ĩ ị o ó ò ỏ õ ọ ô ố ồ ổ ỗ ộ ơ ớ ờ ở ỡ ợ u ú ù ủ ũ ụ ư ứ ừ ử ữ ự y ý ỳ ỷ ỹ ỵ đ A Á À Ả Ã Ạ Ă Ắ Ằ Ẳ Ẵ Ặ Â Ấ Ầ Ẩ Ẫ Ậ E É È Ẻ Ẽ Ẹ Ê Ế Ề Ể Ễ Ệ I Í Ì Ỉ Ĩ Ị O Ó Ò Ỏ Õ Ọ Ô Ố Ồ Ổ Ỗ Ộ Ơ Ớ Ờ Ở Ỡ Ợ U Ú Ù Ủ Ũ Ụ Ư Ứ Ừ Ử Ữ Ự Y Ý Ỳ Ỷ Ỹ Ỵ Đ'
unicodeToHop =   'a á à ả ã ạ ă ắ ằ ẳ ẵ ặ â ấ ầ ẩ ẫ ậ e é è ẻ ẽ ẹ ê ế ề ể ễ ệ i í ì ỉ ĩ ị o ó ò ỏ õ ọ ô ố ồ ổ ỗ ộ ơ ớ ờ ở ỡ ợ u ú ù ủ ũ ụ ư ứ ừ ử ữ ự y ý ỳ ỷ ỹ ỵ đ A Á À Ả Ã Ạ Ă Ắ Ằ Ẳ Ẵ Ặ Â Ấ Ầ Ẩ Ẫ Ậ E É È Ẻ Ẽ Ẹ Ê Ế Ề Ể Ễ Ệ I Í Ì Ỉ Ĩ Ị O Ó Ò Ỏ Õ Ọ Ô Ố Ồ Ổ Ỗ Ộ Ơ Ớ Ờ Ở Ỡ Ợ U Ú Ù Ủ Ũ Ụ Ư Ứ Ừ Ử Ữ Ự Y Ý Ỳ Ỷ Ỹ Ỵ Đ'
aDungSan = unicodeDungSan.split(' ')
aToHop = unicodeToHop.split(' ')

def toUnicodeDungSan(string):
    ''' Chuyển chuỗi tiếng Việt Unicode tổ hợp về dạng Ascii không dấu '''
    res = string
    for i in range(len(aToHop)):
        try:
            res = res.replace(aToHop[i], aDungSan[i])
        except:
            pass
    return res


################################################################
#        IO Function
################################################################

def file_get_content(path, mode='rb'):
    try:
        fp = open(path, mode)
        data = fp.read()
        fp.close()
        return data
    except:
        return None

def file_put_content(data, path, mode='wb'):
    try:
        fp = open(path, mode)
        fp.write(data)
        fp.close()
    except:
        return False
    return True

def gz_file_get_content(path):
    try:
        fp = gzip.open(path, "rb", 9)
        data = fp.read()
        fp.close()
        return data
    except:
        logging.warn(sys.exc_info()[1])
    return None

def gz_file_put_content(data, path):
    try:
        fp = gzip.open(path, "wb", 9)
        fp.write(data)
        fp.close()
    except:
        return False
    return True

################################################################
#        IO Function
################################################################

def getCacheDir():
    path = "/longhoanggiang/cache/"
    makedir(path)
    return path

def getFileExtension(mime):
    mimetypes.init()
    ext = mimetypes.guess_extension(mime)
    if ext in ['.jpe', '.JPE', '.jpeg', '.JPEG']: ext = '.jpg'
    return ext

class Web():
    
    @staticmethod
    def simpleDownload(url, path, prefix='', **kw):
        '''@param ext: default extension of file if can't detect file extension
           @filename: manual set filename for saved file
        '''
        if not path.endswith('/'): path += '/'
        fn = kw.get('filename', '')
        if fn == '':
            filename = os.path.basename(url)
            if filename == '' or '?' in filename: 
                filename = md5(url)
                ext = kw.get('ext', '')
                if ext != '':
                    if not '.' in ext: ext = '.' + ext
                    filename = filename + ext
        else:
            filename = fn
        filepath = path + filename
        urllib.urlretrieve(url, filepath)
        if os.path.exists(filepath):
            logging.info("Downloaded successfully, saved to {0}".format(filepath))
            if not prefix.endswith('/') and prefix != '': prefix += '/'
            return '{0}{1}'.format(prefix, filename)
        logging.warn("Download failed with url: {0}".format(url))
        return False
    
    @staticmethod
    def download(url, path, prefix='', **kw):
        ''' @param prefix
            @param filename
            @param ext: default file extension if can not detect file extension from url
            @param cookie
            @param referer
            @param useragent
            @param overwrite: force over write if file exists
            @param allow_mimetype: allow mimetype
        ''' 
        logging.info("Start downloading {0}".format(url))
        fileName = kw.get('filename', '')
        fileExtension = kw.get('ext', '')
        allowMimeType = kw.get('allow_mimetype', None)
        useragent = kw.get('useragent', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:26.0) Gecko/20100101 Firefox/26.0')
        if not path.endswith('/'): path += '/'
        opener = mechanize.build_opener(mechanize.HTTPRedirectHandler)
        opener.addheaders = [("User-Agent", useragent)]
        for item in [('cookie', 'Cookie'), ('referer', 'Referer')]:
            v = kw.get(item[0])
            if v != '':
                opener.addheaders.append((item[1], v))
        mechanize.install_opener(opener)
        fp = mechanize.urlopen(url)
        urlFileName = extractText(r"/([^/]+)(\..{2,4})?$", url, 1)
        if '?' in urlFileName or len(urlFileName) > 50:
            urlFileName = md5(url)
        hasError = False
        try:
            headers = fp.info()
            size = -1
            if 'content-length' in headers:
                size = long(headers.get('content-length', 0))
            if 'content-type' in headers:
                mime = headers.get('content-type')
                if allowMimeType != None:
                    if not mime in allowMimeType: raise Exception, 'Not allow content type'
                fileExtension = getFileExtension(headers.get('content-type', fileExtension))
            if fileName == '':
                fileName = '{0}{1}'.format(urlFileName, fileExtension)
            filepath = '{0}{1}'.format(path, fileName)
            if not os.path.exists(filepath) or kw.get('overwrite', True):
                tfp = open(filepath, 'wb')
                try:
                    bs = 1024*8
                    read = 0
                    blocknum = 0
                    while 1:
                        block = fp.read(bs)
                        if block == "": break
                        read += len(block)
                        tfp.write(block)
                        blocknum += 1
                finally:
                    tfp.close()
                if os.path.getsize(filepath) == size: logging.info("Downloaded successfully url: {0} --> {1}".format(url, filepath))
            else:
                logging.warn("File already exist")
        except Exception, e:
            hasError = True
            logging.error('{0}:{1}'.format(e, url))
        finally:
            fp.close()
        if not prefix.endswith('/') and (prefix != ''): prefix += '/'
        return prefix + fileName if not hasError else ''
    
    @staticmethod
    def load(url, data={}, referer='', cookie='', useragent='', reqAjax=False, cached=False):
        logging.info("> web.load url: {0}".format(url))
        cacheId = md5(url)
        cacheFile = getCacheDir() + cacheId
        if not cached:
            if os.path.isfile(cacheFile): os.unlink(cacheFile)
        response = CacheManager.get(cacheFile) if cached else None
        if response == None:
            logging.info("load from network without cache")
            opener = mechanize.build_opener(
                    mechanize.HTTPCookieProcessor, 
                    mechanize.HTTPRefererProcessor, 
                    mechanize.HTTPRedirectHandler)
            opener.addheaders = [("User-Agent", useragent if useragent != '' else _useragent.FIREFOX), 
                                 ("Referer", referer),
                                 ("Cookie", cookie)]
            if (reqAjax):
                opener.addheaders.append(("X-Requested-With", "XMLHttpRequest"))
            mechanize.install_opener(opener)
            try:
                http = mechanize.urlopen(url, urlencode(data) if len(data.items())>0 else None)
            except:
                http = urllib.urlopen(url, urlencode(data) if len(data.items())>0 else None)
            content = http.read()
            header = http.info()   
            if ('gzip' in header.get('Content-Encoding', '')):
                    gzipper = gzip.GzipFile(fileobj=cStringIO.StringIO(content))
                    content = gzipper.read()
                    
            response = HttpResponse(content, header)
            if cached: CacheManager.put(cacheFile, response, 5*86400)
        return response
        
    @staticmethod
    def removeCache(url):
        cacheId = md5(url)
        cacheFile = getCacheDir() + cacheId
        if os.path.isfile(cacheFile):
            os.unlink(cacheFile)
        
        
        
        
        
        
        
        
        
        
        
    
