# -*- coding: utf-8 -*-
'''
Created on 28-11-2012

@author: LONG HOANG GIANG
'''
from BeautifulSoup import BeautifulSoup
from httpresponse import HttpResponse
from readability.readability import Document
from lxml import etree
import cPickle
import hashlib
import mechanize
import mimetypes
import os
import re
import json
import sys
import time
import traceback
import urllib
import zlib
import cStringIO
import gzip as _gzip



STRING_EMPTY = ''
USERAGENT = 'Mozilla/5.0 (Windows NT 6.2; rv:18.0) Gecko/18.0 Firefox/18.0'
CACHE_FOLDER = "/longhoanggiang/cache/"
if not os.path.exists(CACHE_FOLDER): os.makedirs(CACHE_FOLDER, 0777)

def gzip(fileName, data):
    gz = _gzip.GzipFile(filename=fileName, mode='wb', compresslevel=9)
    gz.write(data)
    gz.close()

def buildTreeFromHtml(html):
    return etree.parse(cStringIO.StringIO(html), parser=etree.HTMLParser(encoding='utf-8'))

def buildTreeFromXml(xml):
    return etree.parse(cStringIO.StringIO(xml), parser=etree.XMLParser(encoding='utf-8'))

def loadweb(url, **kw):
    '''
    @param url
    @param data: data for post method as dictionary type
    @param ajax: if type ajax, set True
    @param cookie
    @param referer
    @param useragent: default is firefox user agent
    @param nocache  
    '''
    
    data = kw.get('data', None)
    cachefile = '/longhoanggiang/cache/' + md5(url + str(data))
    nocache = kw.get('nocache', False)
    if nocache and os.path.isfile(cachefile):
        os.unlink(cachefile)
    response = pickle_load(cachefile)
    if response == None:
        print '> load web page url: {0}'.format(url)
        opener = mechanize.build_opener(mechanize.HTTPRefreshProcessor, mechanize.HTTPEquivProcessor)
        useragent = kw.get('useragent', USERAGENT)
        opener.addheaders = [('User-Agent', useragent), ('Accept-Encoding', 'gzip,deflate')]
        for item in [['cookie', 'Cookie'], ['referer', 'Referer']]:
            v = kw.get(item[0], '')
            if v != '': opener.addheaders.append((item[1], v))
        if kw.get('ajax', False): opener.addheaders.append(('X-Requested-With', 'XMLHttpRequest'))
        mechanize.install_opener(opener)
        fd = None
        try:
            try:
                fd = mechanize.urlopen(url) if not isinstance(data, dict) else mechanize.urlopen(url, urllib.urlencode(data))
            except:
                fd = urllib.urlopen(url) if not isinstance(data, dict) else urllib.urlopen(url, urllib.urlencode(data))
            response = HttpResponse(fd)
        except:
            traceback.print_exc()
            response = HttpResponse(None)
        finally:
            if fd != None: fd.close()
        pickle_dump(response, cachefile)
    else:
        print '> load web page from cache url: {0}'.format(url)
    return response

def downloadImage(url, path, prefix='', **kw):
    ''' @param: 
        - url: url of file
        - path: the absolute path of file in save folder, ex: /output/
        - prefix: the prefix of return path
        - cookie: cookie string
        - referer: referer url
        - hash: get file name by hash url, default value = False
    '''
    print '> start downloadImage: {0}'.format(url)
    try:
        if path[-1] != '/': path += '/'
        path = os.path.dirname(path)
        if not os.path.exists(path): os.makedirs(path, 0777)
        opener = mechanize.build_opener(mechanize.HTTPRedirectHandler)
        opener.addheaders = [("User-Agent", USERAGENT)]
        for item in [['cookie', 'Cookie'], ['referer', 'Referer']]:
            v = kw.get(item[0], '')
            if v != '': opener.addheaders.append((item[1], v))
        mechanize.install_opener(opener)
        filename = os.path.basename(url) if (re.search(r"jpg|jpe|gif|png|ico|bmp|JPG|JPE|GIF|PNG|ICO|BMP", url) and (kw.get('hash', False) == False)) else md5(url)
        filename = extractText(r"(^.+)\.(jpg|jpe|JPG|JPE|JPEG|jpeg|png|PNG|gif|GIF|ico|ICO|bmp|BMP)", filename, 1, filename)
        req = mechanize.Request(url)
        location = "{0}/{1}{2}" if path != '/' else "{0}{1}{2}"
        fp = mechanize.urlopen(req)
        file_extension = ''
        try:
            headers = fp.info()
            size = -1
            if 'content-length' in headers:
                size = long(headers.get('content-length', 0))
            if 'content-type' in headers:
                if 'text/html' in headers.get('content-type', ''): 
                    raise Exception, 'server response html instead of an image'
                file_extension = getFileExt(headers.get('content-type', 'image/jpeg'))
            filepath = location.format(path, filename, file_extension)
            isExists = False
            if os.path.isfile(filepath):
                if os.path.getsize(filepath) == size: isExists = True
            if not isExists:
                tfp = open(filepath, 'wb')
                try:
                    bs = 1024*8
                    read = 0
                    blocknum = 0
                    while 1:
                        block = fp.read(bs)
                        if block == "": break
                        read += len(block)
                        tfp.write(block)
                        blocknum += 1
                finally:
                    tfp.close()
                if os.path.getsize(filepath) == size: print '> finished {0} --> {1}'.format(url, filepath)
            else:
                print '> image already exists'
        except:
            traceback.print_exc()
            return STRING_EMPTY
        finally:
            fp.close()
        if not prefix.endswith('/') and (prefix != ''): prefix += '/'
        return '{0}{1}'.format(prefix, os.path.basename(filepath))
    except:
        return STRING_EMPTY


def download(url, path):
    opener = mechanize.build_opener(mechanize.HTTPRedirectHandler)
    opener.addheaders = [("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0")]
    mechanize.install_opener(opener)
    filename = extractText("(.+)\?*", os.path.basename(url), 1)
    if filename == '': filename = str(time.time())
    if not path.endswith("/"): path += "/"
    filepath = "{0}{1}".format(path, filename)
    if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath), 0777)
    response = mechanize.urlopen(url)
    header = response.info()
    if header.get('Content-Disposition', '') != '':
        filename = extractText("filename=\"(.+)\"", header.get('Content-Disposition', ''), 1)
        if filename != '': filepath = "{0}{1}".format(path, filename)
    else:
        content_type = header.get('Content-Type', '')
        file_extension = getFileExt(content_type)
        if (re.search("\.[a-zA-Z]{2,4}$", filepath)):
            filepath = re.sub("(\.[a-zA-Z]{2,4})$", file_extension, filepath)
        else:
            filepath = "{0}{1}{2}".format(path, md5(url), file_extension)
    f = open(filepath, 'wb')
    downloaded_size = 0
    print '>> start download {0} to {1}'.format(url, filepath)
    if f != None:
        block_size = 1024
        while 1:
            block = response.read(block_size)
            if block == '' or block == None: break;
            f.write(block)
            downloaded_size += len(block)
        f.close()
        del f
        del response
    file_download_size = long(header.get('Content-Length', 0))
    if (file_download_size != downloaded_size):
        if os.path.isfile(filepath): os.unlink(filepath)
        print 'Downloaded failed with url: {0}'.format(url)
    else:
        print 'Downloaded success: {0} to {1}'.format(url, filepath)
    return

def getHtmlContent(html):
    ''' Sử dụng Readability để lấy phần nội dung của bài viết '''
    readable_article = Document(html).summary()
    readable_article = re.sub(r"\</?(html|body)/?\>", '', readable_article)
    return readable_article


def extractText(pat, match, idx, **kw):
    '''
    @param pat: pattern of regex
    @param match: string to extract text
    @param idx: group number indicate in pattern
    @param default: default value if not match, default is ''
    '''
    default = kw.get('default', '')
    result = default
    pattern = ur"{0}".format(pat) if isinstance(match, unicode) else r"{0}".format(pat)
    try:
        m = re.compile(pattern).search(match)
        if m!='': 
            result = m.group(idx)
    except:
        result = default
    return result


def getFileExt(mime):
    mimetypes.init()
    try:
        ext = mimetypes.guess_extension(mime)
    except:
        ext = ''
    if ext in ['.jpe', '.JPE', '.jpeg', '.JPEG']: ext = '.jpg'
    return ext

def convertHTMLEntitiesToUnicode(html):
    soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES)
    return unicode(soup.body or soup).encode('utf-8')

def file_get_contents(filepath):
    if not os.path.isfile(filepath): return
    text = ''
    f = open(filepath, 'rb')
    if f != None:
        block_size = 1024
        while 1:
            block = f.read(block_size)
            if block == '' or block == None: break
            text += block
        f.close()
    return text

def file_put_contents(filepath, input):
    if not os.path.exists(os.path.dirname(filepath)):
        os.makedirs(os.path.dirname(filepath), 0777)
    f = open(filepath, 'w')
    if f != None:
        f.write(input)
        f.close()
    return

def stringify(node):
    if node == None: return ''
    _stringify = etree.XPath("string()")
    if type(node).__name__ == 'list':
        if len(node) > 0: 
            node = node[0]
        else: 
            return ''
    text = _stringify(node)
    try:
        text = toUnicodeDungSan(text)
    except:
        pass
    return normalize_str(text)




########################################
# ENCRYPT FUNCTION
########################################
def md5(text):
    return hashlib.md5(text).hexdigest()

def sha1(text):
    return hashlib.sha1(text).hexdigest()

def crc32unsigned(text):
    return str(zlib.crc32(text) & 0xffffffffL)
    
##########################################

##########################################
# SERIALIZATION AND DESERIALIZATION OBJECT
##########################################
def pickle_load(file):
    '''
    deserialize object from file
    @param file: path of file that object serialized to 
    '''
    if not os.path.exists(file): return
    fp = None
    data = None
    try:
        fp = open(file, 'r')
        data = cPickle.load(fp)
    finally:
        if fp != None: fp.close()
    return data

def pickle_dump(data, file):
    '''
    serialize an object to file
    @param data: object to serialization
    @param file: path of file to store object  
    '''
    if os.path.dirname(file) == '': file = os.path.join('.', file)
    folder = os.path.dirname(file)
    if not os.path.exists(folder): os.makedirs(folder, 0777)
    fp = None
    try:
        fp = open(file, 'w')
        cPickle.dump(data, fp)
    except:
        traceback.print_exc()
    finally:
        if fp != None: fp.close()

def json_dump(data, path):
    if os.path.dirname(path) == '': path = os.path.join('.', path)
    folder = os.path.dirname(path)
    if not os.path.exists(folder): os.makedirs(folder, 0777)
    fp = None
    try:
        fp = open(path, 'w')
        json.dump(data, fp)
    except:
        traceback.print_exc()
    finally:
        if fp != None: fp.close()

def json_load(file):
    if not os.path.exists(file): return
    fp = None
    try:
        fp = open(file, 'r')
        data = json.load(fp)
    except:
        data = None
    finally:
        if fp != None: fp.close()
    return data
        
###################################################

def toUpper(string):
    patterns = [ "á", "à", "ả", "ã", "ạ", "â", "ấ", "ầ", "ẩ", "ẫ", "ậ", "ă", "ắ", "ằ", "ẳ", "ẵ", "ặ",
                "é", "è", "ẻ", "ẽ", "ẹ", "ê", "ế", "ề", "ể", "ễ", "ệ", "đ",
                "í", "ì", "ỉ", "ĩ", "ị", "ý", "ỳ", "ỷ", "ỹ", "ỵ",
                "ó", "ò", "ỏ", "õ", "ọ", "ô", "ố", "ồ", "ổ", "ỗ", "ộ", "ơ", "ớ", "ờ", "ở", "ỡ", "ợ",
                "ú", "ù", "ủ", "ũ", "ụ", "ư", "ứ", "ừ", "ử", "ữ", "ự" ]
    
    replaces = [ "Á", "À", "Ả", "Ã", "Ạ", "Â", "Ấ", "Ầ", "Ẩ", "Ẫ", "Ậ", "Ă", "Ắ", "Ằ", "Ẳ", "Ẵ", "Ặ",
                "É", "È", "Ẻ", "Ẽ", "Ẹ", "Ê", "Ế", "Ề", "Ể", "Ễ", "Ệ", "Đ",
                "Í", "Ì", "Ỉ", "Ĩ", "Ị", "Ý", "Ỳ", "Ỷ", "Ỹ", "Ỵ",
                "Ó", "Ò", "Ỏ", "Õ", "Ọ", "Ô", "Ố", "Ồ", "Ổ", "Ỗ", "Ộ", "Ơ", "Ớ", "Ờ", "Ở", "Ỡ", "Ợ",
                "Ú", "Ù", "Ủ", "Ũ", "Ụ", "Ư", "Ứ", "Ừ", "Ử", "Ữ", "Ự" ]
    
    for i in range(0,len(patterns)):
        pattern = patterns[i]
        replace = replaces[i]
        string = re.sub(r"{0}".format(pattern), replace, string)
    return string.upper()

unicodeDungSan = 'a á à ả ã ạ ă ắ ằ ẳ ẵ ặ â ấ ầ ẩ ẫ ậ e é è ẻ ẽ ẹ ê ế ề ể ễ ệ i í ì ỉ ĩ ị o ó ò ỏ õ ọ ô ố ồ ổ ỗ ộ ơ ớ ờ ở ỡ ợ u ú ù ủ ũ ụ ư ứ ừ ử ữ ự y ý ỳ ỷ ỹ ỵ đ A Á À Ả Ã Ạ Ă Ắ Ằ Ẳ Ẵ Ặ Â Ấ Ầ Ẩ Ẫ Ậ E É È Ẻ Ẽ Ẹ Ê Ế Ề Ể Ễ Ệ I Í Ì Ỉ Ĩ Ị O Ó Ò Ỏ Õ Ọ Ô Ố Ồ Ổ Ỗ Ộ Ơ Ớ Ờ Ở Ỡ Ợ U Ú Ù Ủ Ũ Ụ Ư Ứ Ừ Ử Ữ Ự Y Ý Ỳ Ỷ Ỹ Ỵ Đ'
unicodeToHop =   'a á à ả ã ạ ă ắ ằ ẳ ẵ ặ â ấ ầ ẩ ẫ ậ e é è ẻ ẽ ẹ ê ế ề ể ễ ệ i í ì ỉ ĩ ị o ó ò ỏ õ ọ ô ố ồ ổ ỗ ộ ơ ớ ờ ở ỡ ợ u ú ù ủ ũ ụ ư ứ ừ ử ữ ự y ý ỳ ỷ ỹ ỵ đ A Á À Ả Ã Ạ Ă Ắ Ằ Ẳ Ẵ Ặ Â Ấ Ầ Ẩ Ẫ Ậ E É È Ẻ Ẽ Ẹ Ê Ế Ề Ể Ễ Ệ I Í Ì Ỉ Ĩ Ị O Ó Ò Ỏ Õ Ọ Ô Ố Ồ Ổ Ỗ Ộ Ơ Ớ Ờ Ở Ỡ Ợ U Ú Ù Ủ Ũ Ụ Ư Ứ Ừ Ử Ữ Ự Y Ý Ỳ Ỷ Ỹ Ỵ Đ'
aDungSan = unicodeDungSan.split(' ')
aToHop = unicodeToHop.split(' ')
def toUnicodeDungSan(string):
    ''' Chuyển chuỗi tiếng Việt Unicode tổ hợp về dạng Ascii không dấu '''
    res = string
    for i in range(len(aToHop)):
        try:
            res = res.replace(aToHop[i], aDungSan[i])
        except:
            pass
    return res

def normalize_str(string):
    if string == None: return ''
    string = toUnicodeDungSan(string)
    if isinstance(string, unicode):
        string = string.encode('utf-8')
    return string.strip()

def toAscii(string):
    ''' Chuyển chuỗi tiếng Việt Unicode về dạng Ascii không dấu '''
    if string == '': return ''
    listPattern = ["á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", "Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   "đ", "Đ", "í|ì|ỉ|ị|ĩ", "Í|Ì|Ỉ|Ị|Ĩ", "é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", "É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   "ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", "Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   "ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", "Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", "ý|ỳ|ỷ|ỵ|ỹ", "Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    for idx in range(len(listPattern)):
        pattern = ur"{0}".format(listPattern[idx]) if isinstance(string, unicode) else r"{0}".format(listPattern[idx])
        string = re.sub(pattern, rep[idx], string)
    return string

class Etree():
    
    @staticmethod
    def clean_elements(node, tag='*', attrs={}):
        if node == None: return
        if type(node).__name__ == 'list':
            if len(node) == 0: return
            node = node[0]
        
        if type(tag).__name__ == 'list':
            for itag in tag:
                Etree.clean_elements(node, itag, attrs)
        else:
            if tag == '' or tag == None: tag = '*'
            if tag == '*' and len(attrs.keys()) == 0: return
            xp = ".//{0}".format(tag)
            if len(attrs.keys()) > 0:
                xp += "["
                c = 0
                for k in attrs.keys():
                    xp += "contains(@{0}, '{1}')".format(k, attrs[k])
                    if len(attrs.keys()) - 1 == c: continue
                    c += 1
                    xp += " and "
                xp += "]"
            for ichild in node.xpath(xp):
                ichild.getparent().remove(ichild)
        return
    
    @staticmethod
    def clean(node):
        if node == None: return
        if type(node).__name__ == '_Element':
            node.getparent().remove(node) 
        elif type(node).__name__ == 'list':
            Etree.clean(inode for inode in node)
        return
    
    @staticmethod
    def clean_following_sibling(node, clean_self=False):
        if node == None: return
        if type(node).__name__ == 'list':
            for inode in node: Etree.clean_following_sibling(inode, clean_self)
        else:
            for item in node.xpath("./following-sibling::*"): 
                item.getparent().remove(item)
            if clean_self: node.getparent().remove(node)
        return
    
    @staticmethod
    def clean_preceding_sibling(node, clean_self=False):
        if node == None: return
        if type(node).__name__ == 'list':
            for inode in node: Etree.clean_following_sibling(inode, clean_self)
        else:
            for item in node.xpath("./preceding-sibling::*"): 
                item.getparent().remove(item)
            if clean_self: node.getparent().remove(node)
        return
    
    @staticmethod
    def clean_comment_tag(html):
        html = re.sub(r"<!--(.+)-->", "", html)
        return html
    
    @staticmethod
    def tostring(node):
        if node == None: return STRING_EMPTY
        html = ''
        if '_Element' in type(node).__name__:
            return Etree.clean_comment_tag(convertHTMLEntitiesToUnicode(etree.tounicode(node, pretty_print=True).encode('utf-8')))
        if type(node).__name__ == 'list':
            idx = 0
            for inode in node:
                html += Etree.tostring(inode)
                if len(node)-1 > idx: html += '\n<p>&nbsp;</p>\n'
                idx += 1
        return html


if __name__ == '__main__':
    
    a = u'con chó: 0453 - hehehe'
    print extractText("(.+: \d+)", a, 1)
    


