# -*- coding: utf-8 -*-
import zlib                 # Dùng tính CRC32
import hashlib              # Dùng tính hash Md5
import traceback            # Dùng để xử lý debug
import sys
import os
import re                   # Thư viện regex
import time
import mechanize
import cStringIO as StringIO
import urllib
import logging
import gzip
try:
    import paramiko
except:
    pass
from BeautifulSoup import BeautifulSoup
from lxml import etree
from urllib import urlretrieve
from termcolor import cprint
from urlparse import urlparse
from difflib import SequenceMatcher
from readability.readability import Document
from mongolog.handlers import MongoHandler
import Mp3

_stringify = etree.XPath("string()")     # Hàm lấy string trong Html element

def getMongoLog(host, port, name):
    log = logging.getLogger(name)
    log.setLevel(logging.WARNING)
    log.addHandler(MongoHandler.to('mongolog', 'log', host=host, port=port))
    return log
    
def getLogger(name, fm='', path=''):
    if fm == '': fm = '%(levelname)s - %(asctime)s - line: %(lineno)s - %(message)s'
    logging.basicConfig(format=fm)
    logger = logging.getLogger(name)
    if path != '':
        ensureFolderExist(path)
        handler = logging.FileHandler(path)
        handler.setFormatter(logging.Formatter(fm))
        logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    return logger

logger = getLogger('commonlib')

def getArticleNode(html, clean=[]):
    readable_article = encodeUTF8Str(Document(html).summary())
    readable_article = readable_article.replace('<body/>', '')
    tree = buildTreeFromHTML(readable_article)
    contentNode = tree.xpath("/html/body")
    if contentNode == None: return None
    contentNode = contentNode[0]
    for item in clean:
        cleanElementWithAttrib(contentNode, item[0], item[1])
    return contentNode

def getSSH(hostname='mana.vn', username='giangnh'):
    ssh = None
    try:
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        print 'start ssh to {0}@{1}'.format(username, hostname)
        ssh.connect(hostname=hostname, username=username)
        print 'connected to {0}'.format(hostname)
    except:
        traceback.print_exc()
    return ssh

def downloadNUpload(ssh, url, path, prefix=''):
    print 'call downloadNUpload url={0} to {1}'.format(url, path)
    url = url.replace(' ', '%20')
    if path[-1] != '/': path += '/'
    ext = extractWithRegEx(r'.+(\.[a-zA-Z]{1,4})$', url, 1).lower()
    if (ext == '.jpeg' or ext == '.jpe' or ext == ''): ext = '.jpg'
    fileName = '{0}{1}'.format(getMd5FileName(url), ext)
    filePath = '{0}{1}'.format(path, fileName)
    filePathReturn = '{0}{1}'.format(prefix, fileName)
    err = False
    if not os.path.isfile(filePath):
        ido = 0
        while ido < 4:
            ido += 1
            try:
                folderPath = os.path.dirname(filePath) 
                if not os.path.exists(folderPath):
                    try: os.system("umask 000")
                    except: pass
                    os.makedirs(folderPath, 0777)
                located, response = urlretrieve(url, filePath)
                if response is not None:
                    try:
                        if not 'image' in response.get('Content-Type', ''): raise
                        if long(response.get('Content-Length', 0)) != os.path.getsize(located): raise Exception('File size downloaded diffirent to original file')
                        err = False
                        break
                    except:
                        continue
            except:
                err = True
                removeFile(filePath)
    if err or not os.path.isfile(filePath): 
        raise TypeError, 'Could not download image url={0}'.format(url)
    uploadFailed = False
    if ssh is not None:
        sftp = None
        try:
            print 'DEBUG: start upload {0}'.format(filePath)
            ssh.exec_command('mkdir -p -m 777 {0}'.format(os.path.dirname(filePath)))
            ido = 0
            sftp = ssh.open_sftp()
            while ido < 3:
                ido += 1
                try:
                    sftp.put(filePath, filePath)
                except: 
                    uploadFailed = True; 
                    print 'WARNING: reupload url={0} to {1}'.format(url, path); 
                    continue
                uploadFailed = False
                break
            if uploadFailed == False:
                print 'INFO: finished upload {0}'.format(filePath)
            else:
                print 'WARNING: upload failed'
        except:
            raise Exception('Could not upload file {0}'.format(filePath))
        finally:
            if sftp is not None: sftp.close()
        if uploadFailed: raise Exception('Could not upload file {0}'.format(filePath))
    return filePathReturn
            
def compareString(a, b):
    s = SequenceMatcher()
    s.set_seqs(a, b)
    return s.ratio()

def convertHTMLEntitiesToUnicode(html):
    soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES)
    return unicode(soup.body or soup)

def sameOfTwoString(a, b):
    if a == None: a = ''
    if b == None: b = ''
    if a == '' or b == '': return ''
    s = SequenceMatcher()
    s.set_seqs(a, b)
    result = s.find_longest_match(0, len(a), 0, len(b))
    i = result[0]; step = result[2]
    return a[i: i+step] 

def getFirstNode(tree, xpath):
    ''' Trả về phần tử đầu tiên collection kết quả của tree.xpath(...).
        Nếu tree.xpath(...) không có kq nào -> trả về None.
    '''
    nodes = tree.xpath(xpath)
    if len(nodes) > 0: return nodes[0]
    return None
        
def baseUrl(link):
    ret = urlparse(link)
    return "{0}://{1}".format(ret.scheme, ret.netloc)

def normalizeStr(s):
    return re.sub('\s+', ' ', re.sub('[\n\r\t]', '', s)).strip()

def getCRC32Unsign(textToHash=None):
    return str(zlib.crc32(textToHash) & 0xffffffffL)

def getMD5Hash(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def getMd5Path(stringToHash):
    s = getMD5Hash(stringToHash)
    s = '{0}/{1}/{2}/{3}'.format(s[0], s[1], s[2], s[3])
    return s

def getMd5FileName(stringToHash):
    s = getMD5Hash(stringToHash)
    s = '{0}/{1}/{2}/{3}/{4}'.format(s[0], s[1], s[2], s[3], s[4:])
    return s

def readfile(filepath, method='r', size_of_block=4096):
    content = ""
    try:
        f = open(filepath, method)
        content = f.read(size_of_block)
        f.close()
    except IOError:
        cprint("ERROR: unable to read file ({0})".format(filepath), 'red')
    finally:
        return content
        
def downloadImage(url, path, prefix=''):
    ''' Lưu ảnh xuống local với tên dựa file local dựa vào hash Md5. Nếu local đã có file rồi thì 0 load nữa. '''
    url = url.replace(' ', '%20')
    if not path.endswith('/'): path += '/'
    ext = extractWithRegEx(r'.+(\.[a-zA-Z]{1,4})$', url, 1).lower()
    if (ext == '.jpeg' or ext == '.jpe' or ext == ''): ext = '.jpg'
    fileName = '{0}{1}'.format(getMd5FileName(url), ext)
    filePath = '{0}{1}'.format(path, fileName)
    cprint("DEBUG: download image from url={0} to location={1}".format(url, filePath), 'green')
    filePathReturn = '{0}{1}'.format(prefix, fileName)
    currentRetry, maxRetries = 0, 3
    saveFlag = False 
    while not saveFlag and currentRetry < maxRetries:
        try:
            if os.path.isfile(filePath):
                if "html" in readfile(filePath, 'r') or os.path.getsize(filePath) < 1:
                    try: 
                        os.unlink(filePath); 
                    except: 
                        pass
            if not os.path.isfile(filePath):
                folderPath = os.path.dirname(filePath)
                if not os.path.exists(folderPath):
                    try: os.umask(000)
                    except: pass 
                    os.makedirs(folderPath, 0777)
                location, serverResponse = urlretrieve(url, filePath)
                if serverResponse.has_key('Content-Type'):
                    if not 'image' in serverResponse['Content-Type']:
                        raise Exception("EXCEPTION: server response not an image type. content-type server response is '{0}'".format(serverResponse['Content-Type']))
                elif serverResponse.has_key('Content-Length'): 
                    if long(serverResponse['Content-Length']) != os.path.getsize(location):
                        raise Exception('EXCEPTION: download failed with reason: filesize downloaded not equal the file size server responsed ({0} <> {1})'
                                        .format(serverResponse['Content-Length']), os.path.getsize(location))
                elif 'html' in readfile(location, 'rb'):
                    raise Exception("EXCEPTION: just downloaded a html file. Failed to download image !")
            saveFlag = True
        except:
            if os.path.isfile(filePath): os.unlink(filePath)
            cprint('ERROR: download image error (reason: {0}), url={1}'.format(sys.exc_info()[1], url), 'grey')
        currentRetry += 1
    return filePathReturn if saveFlag == True else ''

def downloadMp3(url, download_path, prefix='', ext='mp3'):
    ''' Lưu file audio, video từ nhaccuatui.com xuống local với tên dựa file local dựa vào hash Md5. Nếu local đã có file rồi thì 0 load nữa.
        - Nếu file đã có trên local -> báo lỗi
        - Nếu mã http không phải 200 -> báo lỗi
        - Nếu có lỗi sẽ thử lại thêm 2 lần
    '''
    if not url.endswith('mp3') and not url.endswith('mp4') and not url.endswith('Mp3') and not url.endswith('Mp4'):
        return 'Err:chi download cac file mp3, mp4', '-1'
    ext = 'mp3' if url.endswith('mp3') or url.endswith('Mp3') else 'mp4'
    if download_path[-1] != '/': download_path += '/'
    if prefix != '':
        if prefix[-1] != '/': prefix += '/'
    fName = "{0}.{1}".format(getMd5FileName(url), ext)
    rPath = "{0}{1}".format(prefix, fName)
    filename = "{0}{1}.{2}".format(download_path, getMd5FileName(url), ext)
    filesize = -1
    try:
        print 'commonlib.downloadMp3: {0} -> {1}'.format(url, filename)
        if os.path.isfile(filename):
            if os.path.getsize(filename) < 100: removeFile(filename) 
        if not os.path.isfile(filename):
            path = os.path.dirname(filename)
            if not os.path.exists(path):
                try: os.umask(000)
                except: pass
                os.makedirs(path, 0777)
            successFlag = False
            # NOTE: truong hop download co van de se thu lai 2 lan nua
            for ido in 1,2,3:
                localtion, response = urlretrieve(url, filename)
                responseSize = long(response['Content-Length'])
                filesize = os.path.getsize(filename)
                if filesize == responseSize:
                    successFlag = True
                    print 'commonlib.downloadMp3 finished %s' % filename
                    break
            if not successFlag: return 'Err: kich thuoc file da download nho hon kich thuoc file do server tra ve', '-1'
            return rPath, str(filesize)
        else:
            return rPath, str(os.path.getsize(filename))
    except:
        return 'Err: {0}'.format(sys.exc_info()[1]), '-1'

def extractWithRegEx(pat, matchStr, matchIdx, defaultVal = ''):
    ''' Hàm chạy regex và trả kết quả ở group[matchIdx]. Nếu không có kết quả -> trả về giá trị của defaultVal
    '''
    try:
        result = defaultVal
        rexp = re.compile(pat)
        m = rexp.search(matchStr)
        if (m!=''):
            result = m.group(matchIdx)
        return result
    except:
        return defaultVal

def encodeUTF8Str(str):
    ''' Nếu chuỗi là kiểu unicode thì encode thành utf-8 '''
    if dataTypeName(str) == 'unicode':
        str = str.encode('utf-8')
    return str

def dataTypeName(args):
    ''' Trả lại tên kiểu dữ liệu '''
    if args is None:
        return 'None'
    return type(args).__name__

def encodeUTF8(args):
    ''' Chuyển toàn bộ chuỗi có kiểu unicode trong dictionary hoặc list hoặc string thành utf-8'''
    dataType = dataTypeName(args)
    if dataType == 'unicode':
        return encodeUTF8Str(args)
    elif dataType == 'list':
        data = []
        for i in data:
            data.append(encodeUTF8Str(i))
        return data
    elif dataType == 'dict':
        data = {}
        for i, v in args.iteritems():
            data[i] = encodeUTF8Str(v)
        return data

def toAscii(str):
    ''' Chuyển chuỗi tiếng Việt Unicode tổ hợp về dạng Ascii không dấu '''
    if str == '': return ''
    str = encodeUTF8Str(str)
    listPattern = ["á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", "Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   "đ", "Đ", "í|ì|ỉ|ị|ĩ", "Í|Ì|Ỉ|Ị|Ĩ", "é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", "É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   "ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", "Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   "ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", "Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", "ý|ỳ|ỷ|ỵ|ỹ", "Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    for idx in range(len(listPattern)):
        str = re.sub(listPattern[idx], rep[idx], str)
    return str

unicodeDungSan = 'a á à ả ã ạ ă ắ ằ ẳ ẵ ặ â ấ ầ ẩ ẫ ậ e é è ẻ ẽ ẹ ê ế ề ể ễ ệ i í ì ỉ ĩ ị o ó ò ỏ õ ọ ô ố ồ ổ ỗ ộ ơ ớ ờ ở ỡ ợ u ú ù ủ ũ ụ ư ứ ừ ử ữ ự y ý ỳ ỷ ỹ ỵ đ A Á À Ả Ã Ạ Ă Ắ Ằ Ẳ Ẵ Ặ Â Ấ Ầ Ẩ Ẫ Ậ E É È Ẻ Ẽ Ẹ Ê Ế Ề Ể Ễ Ệ I Í Ì Ỉ Ĩ Ị O Ó Ò Ỏ Õ Ọ Ô Ố Ồ Ổ Ỗ Ộ Ơ Ớ Ờ Ở Ỡ Ợ U Ú Ù Ủ Ũ Ụ Ư Ứ Ừ Ử Ữ Ự Y Ý Ỳ Ỷ Ỹ Ỵ Đ'
unicodeToHop =   'a á à ả ã ạ ă ắ ằ ẳ ẵ ặ â ấ ầ ẩ ẫ ậ e é è ẻ ẽ ẹ ê ế ề ể ễ ệ i í ì ỉ ĩ ị o ó ò ỏ õ ọ ô ố ồ ổ ỗ ộ ơ ớ ờ ở ỡ ợ u ú ù ủ ũ ụ ư ứ ừ ử ữ ự y ý ỳ ỷ ỹ ỵ đ A Á À Ả Ã Ạ Ă Ắ Ằ Ẳ Ẵ Ặ Â Ấ Ầ Ẩ Ẫ Ậ E É È Ẻ Ẽ Ẹ Ê Ế Ề Ể Ễ Ệ I Í Ì Ỉ Ĩ Ị O Ó Ò Ỏ Õ Ọ Ô Ố Ồ Ổ Ỗ Ộ Ơ Ớ Ờ Ở Ỡ Ợ U Ú Ù Ủ Ũ Ụ Ư Ứ Ừ Ử Ữ Ự Y Ý Ỳ Ỷ Ỹ Ỵ Đ'
aDungSan = unicodeDungSan.split(' ')
aToHop = unicodeToHop.split(' ')
def toUnicodeDungSan(strInp):
    ''' Chuyển chuỗi tiếng Việt Unicode tổ hợp về dạng Ascii không dấu
    '''
    res = strInp
    for i in range(len(aToHop)):
        res = res.replace(aToHop[i], aDungSan[i])
    return res

def unescapeHTML(str, **args):
    '''Chuyen tu html entities thanh ky tu binh thuong vd &nbsp; thanh dau space'''

    from htmlentitydefs import name2codepoint
    str = encodeUTF8Str(str)
    result = str
    try:
        result = re.sub(r'&#(\d+);', lambda m: unichr(int(m.group(1))), result)
        result = re.sub(r'&(\w+);', lambda m: unichr(name2codepoint[m.group(1)]), result)
        result = re.sub(r'&#(\d+);|&(\w+);', '', result)
    except:
        traceback.print_exc()
    return result

def wordCount(str):
    ''' Đếm số từ của chuỗi.'''
    wordNum = 0
    for line in str.split('\n'):
        line = re.sub(r"\s+", ' ', line).strip()
        if len(line) < 1: continue
        if re.search(r'\w', line) == None: continue 
        for w in line.split(' '):
            if re.search(r'\w', w): wordNum += 1
    return wordNum

def getHTML(url, data={}, headers={}, cookie=None, debugLevel=0, delay=0, max_retries=3):
    from CrawlerLib import Http
    return Http.getHtml(url, data, headers)
    logger.debug("--- start getHTML {0}".format(url))
    url = re.sub(r'\s', '%20', url)
    html = ''
    ic = 0
    while ic <= max_retries:
        err = False
        try:
            debugHandler = mechanize.HTTPHandler()
            debugHandler.set_http_debuglevel(debugLevel)
            if cookie == None: cookie = mechanize.LWPCookieJar()
            opener = mechanize.build_opener(debugHandler, mechanize.HTTPCookieProcessor(cookie), mechanize.HTTPRedirectHandler)
            aH = [("User-Agent", "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1")]
            for i, v in headers.items(): aH.append((i, v))
            opener.addheaders = aH
#            mechanize.install_opener(opener)
            if isinstance(data, dict) and len(data)>0:
                response = mechanize.urlopen(url, urllib.urlencode(data))
            else:
                response = mechanize.urlopen(url)
            print response.read()
            content_type = response.info().getheader('Content-Encoding', '')
            html = response.read()
            if content_type == 'gzip':
                gzipper = gzip.GzipFile(fileobj=StringIO.StringIO(html))
                html = gzipper.read()
        except mechanize.HTTPError, e:
            logger.error("({0}) {1}".format(url, e.msg))
            if e.msg == 'Not Found': break
            err = True;
        except mechanize.URLError, e:
            logger.error("({0}) {1}".format(url, e.reason))
            err = True;
        except:
            traceback.print_exc()
        if not err:
            logger.debug("--- finished {0}".format(url))
            break
        else:
            logger.debug("--- failed {0}".format(url))
        if delay > 0: time.sleep(delay)
        ic += 1
    
    if len(html) > 0: 
        while ord(html[0]) > 127: html = html[1:]
    return html

def buildTreeFromHTML(html):
    tree = None
    try:
        parser = etree.HTMLParser(encoding='utf-8')
        tree = etree.parse(StringIO.StringIO(html), parser)
    except:
        traceback.print_exc()
    return tree

def getXMLTree(url, data={}, headers={}, isXML=False, outputHTML=False, cookie=None, debugLevel=0, userAgent='', root_url='', timeout=300):
    logger.debug("--- start {0}".format(url))
    tree = None
    error = False
    try:
        debugHandler = mechanize.HTTPHandler()
        debugHandler.set_http_debuglevel(debugLevel)
        if cookie == None: cookie = mechanize.LWPCookieJar()
        opener = mechanize.build_opener(debugHandler, mechanize.HTTPCookieProcessor(cookie), mechanize.HTTPRedirectHandler)
        if userAgent == '':
            aH = [("User-Agent", "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1")]
        else:
            aH = [("User-Agent", userAgent)]
        for i, v in headers.items(): aH.append((i, v))
        opener.addheaders = aH
        mechanize.install_opener(opener)
        request = mechanize.Request(url, urllib.urlencode(data)) if len(data) > 0 else mechanize.Request(url)
        response = mechanize.urlopen(request, timeout=timeout)
        if root_url != '':
            if response.geturl() == root_url: raise Exception, "302 server redirected to " + root_url
        html = response.read()
        content_type = response.info().getheader('Content-Encoding', '')
        if content_type == 'gzip':
            gzipper = gzip.GzipFile(fileobj=StringIO.StringIO(html))
            html = gzipper.read()
        if outputHTML==True: print html
        parser = etree.XMLParser(encoding='utf-8') if isXML==True else etree.HTMLParser(encoding='utf-8')
        tree = etree.parse(StringIO.StringIO(html), parser)
    except mechanize.HTTPError, e:
        logger.error("({0}) {1}".format(url, e.msg))
        error = True;
    except mechanize.URLError, e:
        logger.error("({0}) {1}".format(url, e.reason))
        error = True;
    finally:
        if not error:
            logger.debug("--- finished {0}".format(url))
        else:
            logger.debug("--- failed {0}".format(url))
    return tree

def urlJoin(seg1, seg2):
    from urlparse import urljoin
    return urljoin(seg1, seg2)

def replaceStr(pat, repl, input):
    ''' replace string with pattern '''
    return re.sub(pat, repl, input)

def getAttribText(elem, attrib, defaultVal = ''):
    ''' Lấy giá trị thuộc tính của một element '''
    elemAttribText = defaultVal
    try:
        if dataTypeName(elem) == 'list':
            elem = elem[0]
        elemAttribText = encodeUTF8Str(elem.get(attrib))
        return elemAttribText
    except:
        return defaultVal

def getElementText(elem, **args):
    '''Mot số element có tả tail và text node
        - cho phép chỉ lấy value của text node
        - chỉ lấy value của tail node
        - lấy value của cả tail và text node
    '''
    defaultVal = ''
    #NOTE: mot so element co ca text va tail node nhung chi muon lay text node thi se truyen them tail=0
    tail = 1
    text = 1
    descendant = 0
    for i, v in args.items():
        exec('%s = %s' % (i, v))
    elemText = defaultVal
    try:
        if dataTypeName(elem) == 'list':
            elem = elem[0]
        if not descendant:
            if elem.text is not None and elem.tail is not None:
                if text:
                    if elem.text.strip() != '': elemText += elem.text.strip()
                if tail:
                    if elem.tail.strip() != '': elemText += ' ' + elem.tail.strip()
            elif elem.tail is not None:
                if elem.tail.strip() != '':
                    elemText = elem.tail.strip()
            elif elem.text is not None:
                if elem.text.strip() != '':
                    elemText = elem.text.strip()
        else:
            elemText = stringify(elem).strip()
#            elemText = encodeUTF8Str(elemText)
        try:
            ret = toUnicodeDungSan(elemText)
            elemText = ret
        except:
            elemText = encodeUTF8Str(elemText)
        return replaceStr(r'\s+', ' ', elemText)
    except:
        return defaultVal

def stringify(node):
    if node == None: return ''
    if type(node).__name__ == 'list':
        if len(node)>0: node = node[0]
        else: return ''
    text = normalizeStr(_stringify(node))
    text = encodeUTF8Str(text)
    try:
        return toUnicodeDungSan(text)
    except:
        return encodeUTF8Str(text)

def unquoteUrl(str_inp):
    repl = [['3A', ':'], ['2F','/']]
    for irepl in repl:
        str_inp = str_inp.replace("%25{0}".format(irepl[0]), irepl[1])
    return urllib.unquote_plus(str_inp)

def cleanElementWithTag(element, remove_tags=[]):
    '''Ham loai bo child node muon loai bo cua element
       - tham so truyen vao la mang cac tag muon loai bo
    '''
    try:
        if element == None: return ''
        if type(element).__name__ == 'list': 
            if len(element) > 0: element = element[0]
        for ichild in element.iterdescendants():
            for it in remove_tags:
                if it == ichild.tag: ichild.getparent().remove(ichild); break
    except:
        traceback.print_exc()
    
def cleanElementWithAttrib(element, tag='', attrib=[()]):
    '''Ham loai bo child node cua element
       - loai theo tag voi attrib
       - loai tat ca phu hop voi attrib
    '''
    try:
        if element == None: return ''
        if type(element).__name__ == 'list': element = element[0]
        if tag == '': tag = None
        for elem in element.iterdescendants(tag=tag):
            f = True
            for (i, v) in attrib:
                if elem.attrib.get(i, '') != v: f = False; break
            if f:
                pElem = elem.getparent()
                if pElem != None: pElem.remove(elem)  
    except:
        traceback.print_exc()

def cleanAllElementAfter(element, xpathOfNode, xpathOfFollowing):
    for item in element.xpath(xpathOfFollowing):
        try:
            item.getparent().remove(item)
        except:
            print 'Không thể xóa được element'
    item = element.xpath(xpathOfNode)
    if item != None:
        if len(item) > 0:
            if item[0].getparent() != None: item[0].getparent().remove(item[0])

def dropTagWithIndex(element, tag, index, attrib=()):
    if element == None: return ''
    if dataTypeName(element) == 'list': element = element[0]
    if tag == '': tag = None
    idx = 0
    result = {} if dataTypeName(index) == 'list' else ''
    for elem in element.iterdescendants(tag=tag):
        idx += 1
        if dataTypeName(index) == 'list':
            if idx in index:
                if len(attrib) > 0:
                    tagAttribute, tagAttributeValue = attrib
                    if elem.get(tagAttribute) != tagAttributeValue: continue
                result[idx] = getElementText(elem, descendant=1)
                elem.getparent().remove(elem)
        else:
            if idx == index:
                if len(attrib) > 0:
                    tagAttribute, tagAttributeValue = attrib
                    if elem.get(tagAttribute) != tagAttributeValue: return ''
                result = getElementText(elem, descendant=1)
                elem.getparent().remove(elem)
    return result
    
def ensureFolderExist(path):
    folderPath = os.path.dirname(path)
    if not os.path.exists(folderPath):
        os.makedirs(folderPath, 0777)

def removeFile(filepath):
    try:
        os.unlink(filepath)
    except:
        print 'Unable to delete {0}'.format(filepath)

def downloadMp3NConvert(url, download_path, prefix='', ext='mp3'):
    ''' Lưu file audio, video từ nhaccuatui.com xuống local với tên dựa file local dựa vào hash Md5. Nếu local đã có file rồi thì 0 load nữa.
        - Nếu file đã có trên local -> báo lỗi
        - Nếu mã http không phải 200 -> báo lỗi
        - Nếu có lỗi sẽ thử lại thêm 2 lần
    '''
    if not url.endswith('mp3') and not url.endswith('mp4') and not url.endswith('Mp3') and not url.endswith('Mp4'):
        return 'Err:chi download cac file mp3, mp4', '-1'
    ext = 'mp3' if url.endswith('mp3') or url.endswith('Mp3') else 'mp4'
    if download_path[-1] != '/': download_path += '/'
    if prefix != '':
        if prefix[-1] != '/': prefix += '/'
    fName = "{0}.{1}".format(getMd5FileName(url), ext)
    rPath = "{0}{1}".format(prefix, fName)
    filename = "{0}{1}.{2}".format(download_path, getMd5FileName(url), ext)
    filesize = -1
    try:
        print 'commonlib.downloadMp3NConvert: {0} -> {1}'.format(url, filename)
        if os.path.isfile(filename):
            if os.path.getsize(filename) < 100: removeFile(filename) 
        if not os.path.isfile(filename):
            path = os.path.dirname(filename)
            if not os.path.exists(path):
                try: os.umask(000)
                except: pass
                os.makedirs(path, 0777)
            successFlag = False
            # NOTE: truong hop download co van de se thu lai 2 lan nua
            for ido in 1,2,3:
                localtion, response = urlretrieve(url, filename)
                responseSize = long(response['Content-Length'])
                filesize = os.path.getsize(filename)
                duration = Mp3.getInfo(filename)
                if filesize == responseSize:
                    successFlag = True
                    print 'commonlib.downloadMp3 finished %s' % filename
                    break
            if not successFlag: return 'Err: kich thuoc file da download nho hon kich thuoc file do server tra ve', '-1', '-1'
            Mp3.convert2Amr(filename)
            return rPath, str(filesize), str(duration)
        else:
            Mp3.convert2Amr(filename)
            return rPath, str(os.path.getsize(filename)), str(Mp3.getInfo(filename))
    except:
        return 'Err: {0}'.format(sys.exc_info()[1]), '-1', '-1'

