# -*- coding: utf-8 -*-
import re
import mechanize
import cStringIO as StringIO
import lxml
from lxml.html import etree
import sys
import time
from pymongo import Connection
import zlib
import hashlib
import os
from urlparse import urljoin
from urllib import urlretrieve
import datetime
import logging
from mongolog.handlers import MongoHandler
import workerpool


#---    mongo log
log = logging.getLogger('nhaczingvn')
log.setLevel(logging.DEBUG)
log.addHandler(MongoHandler.to('mongolog', 'log'))
#---    folder to save image, music
localFilepath = '/home/hoangnamhai/HarvestedData/nhaczingvn/'
localFileMusicPath = '/home/hoangnamhai/HarvestedData/nhaczingvn_music/'
#---    db
__dbname__ = 'nhaczingvn'

#---    main function
def getCRC32Unsign(textToHash=None):
    return str(zlib.crc32(textToHash) & 0xffffffffL)

def standardizeJSONStr(str_inp):
    str_inp = re.sub(r"\r\n|\n|\\", '', str_inp)
    return  re.sub(r'=\"(.+?)\"', lambda m: "='{0}'".format(m.group(1)), str_inp)

def dbconnect(dbname, collectionName):
    try:
        connection = Connection('localhost', 27017)
        db = connection[dbname]
        collection = db[collectionName]
        return collection
    except:
        print '>> Error at function dbconnect reason: ', sys.exc_info()[0], '>>', sys.exc_info()[1]
        sys.exit()

def getMD5Hash(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def getMd5Path(stringToHash):
    s = getMD5Hash(stringToHash)
    s = '{0}/{1}/{2}/'.format(s[0], s[1], s[2])
    return s

def getMd5FileName(stringToHash):
    s = getMD5Hash(stringToHash)    
    s = '{0}/{1}/{2}/{3}'.format(s[0], s[1], s[2], s[3:])
    return s

def buildTree(url, **kw):
    print '>> start buildTree with url: {0}'.format(url)
    s_t = time.time()
    tree = None
    try:
        op = mechanize.build_opener(mechanize.HTTPEquivProcessor, mechanize.HTTPRefreshProcessor, mechanize.HTTPRefererProcessor)
        op.addheaders = [("User-agent", "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13")]
        html = op.open(url).read()
        if kw.get('isHTML'):
            print html
        if kw.get('xml'):
            parser = etree.XMLParser(encoding='utf-8')
        else:
            parser = etree.HTMLParser(encoding='utf-8')
        tree = etree.parse(StringIO.StringIO(html), parser)
    except:
        print '>> Error occurred at function buildTree url "{0}" with reason {1}'.format(url, sys.exc_info()[1])
        if kw.get('retry_time') is None:    retry_time = 1
        else:   retry_time = kw.get('retry_time') + 1
        if retry_time <= 3:
            print '>> Retry url {0}: {1}'.format(url, retry_time)
            tree = buildTree(url, retry_time=retry_time)
    finally:
        print '>> Finished url: ({0}) at {1}s'.format(url, time.time()-s_t)
        return tree

def saveImage(url):
    ''' Lưu ảnh xuống local với tên dựa file local dựa vào hash Md5. Nếu local đã có file rồi thì 0 load nữa. '''
    try:
        localFilename = '{0}{1}.jpg'.format(localFilepath, getMd5FileName(url))
#        print 'saveImage: {0} -> {1}'.format(url, localFilename)
        if not os.path.isfile(localFilename):
            if not os.path.exists(localFilepath + getMd5Path(url)):
                os.makedirs(localFilepath + getMd5Path(url))
            urlretrieve(url, localFilename)
        return localFilename
    except:
        err_str = 'saveImage error: {0} >> {1}'.format(url, sys.exc_info()[1])
        print err_str
        log.error(err_str)
        pass
    return ''  

def download(dLink, songLink, **kw):
#    file_name = standardizeFileName(os.path.basename(dLink))
    file_name = regex(r"filename=(.+)", dLink, 1)
    file_ext = file_name[len(file_name)-3:len(file_name)]
    try:
        t_time = time.time()
        localpath = '{0}{1}.{2}'.format(localFileMusicPath, getMd5FileName(songLink), file_ext)
        if not os.path.isfile(localpath):
            if not os.path.exists(localFileMusicPath + getMd5Path(songLink)):
                os.makedirs(localFileMusicPath + getMd5Path(songLink))
            print '>> start download file "{0}" realURL: {1}'.format(file_name, songLink)
            op = mechanize.build_opener(mechanize.HTTPEquivProcessor, mechanize.HTTPRefreshProcessor, mechanize.HTTPRefererProcessor)
            op.addheaders = [("User-agent", "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13")]
            fd = op.open(dLink)
            meta = fd.info()
            filesize = int(meta['Content-Length'])
            if filesize < 1000: raise Exception("file size small than 1000")
            block_size = 8192
            dl_size, nu_block = 0, 0
            fp = open(localpath, 'wb')
            while 1:
                block = fd.read(block_size)
                if block == "":
                    break
                dl_size += len(block)
                fp.write(block)
                nu_block += 1
            fp.close()
            op.close()
            del fp
            del fd
            print '>> finished download {0} [total block {1} dlsize: {2}, filesize: {3}]at {4}s :: stored in {5}'.format(strToASCII(file_name), nu_block, dl_size, filesize, time.time()-t_time, localpath)
        return localpath
    except:
        retry_time = kw.get('retry_time')
        if retry_time is None:  retry_time = 1
        else:   retry_time += 1
        err_str = 'download error: {0} >> {1}'.format(songLink, sys.exc_info()[1])
        print '===> ', err_str
        print '--> retry url {0}'.format(songLink)
        if retry_time <=3:
            download(dLink, songLink, retry_time=retry_time)
        log.error(err_str)
        pass
    return '' 

def strToASCII(str):
    if str == '': return ''
    listPattern = [r"á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", r"Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   r"đ", r"Đ", r"í|ì|ỉ|ị|ĩ", r"Í|Ì|Ỉ|Ị|Ĩ", r"é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", r"É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   r"ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", r"Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   r"ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", r"Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", r"ý|ỳ|ỷ|ỵ|ỹ", r"Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    str = str.encode('utf-8', 'replace')
    for idx in range(len(listPattern)):
        str = re.sub(listPattern[idx], rep[idx], str)
    return str

def getElementText(elem):
    if elem == None: return ''
    t = lxml.html.fromstring(etree.tostring(elem))
    return re.sub(r"\s+", ' ', re.sub(r"\r\n|\n", '', t.text_content()))

def getAttributeText(elem, attr):
    if elem is None or attr == '':
        return ''
    return elem.get(attr)

def regex(pat, matchStr, matchIdx):
    try:
        rexp = re.compile(pat)
        m = rexp.search(matchStr)
        if (m != ''): return m.group(matchIdx)
        return ''
    except:
        return ''
    
def standardizeFileName(str):
    str = re.sub(r"\[.+\]", '', str)
    fileFragment = str.split('.')
    fName = fileFragment[0]
    fExt = fileFragment[1]
    return '{0}.{1}'.format(fName.strip(), fExt)
    
class DownloadJob(workerpool.Job):
    def __init__(self, dLink, songLink):
        self._dLink = dLink
        self._songLink = songLink
        
    def run(self):
        download(self._dLink, self._songLink)
        
#---    function process nhaczingvn

class NhacZingVN():
    _plUrl = "http://nhac.zing.vn/nhac/playlist/index.1.html"
    
    def getAPlist(self, aPlaylist):
        print '>> start getAPlist {0}'.format(strToASCII(aPlaylist['title']))
        log.debug('start getAPlist {0}'.format(strToASCII(aPlaylist['title'])))
        url = aPlaylist['linkPlaylist']
        try:
            tree = buildTree(url)
            songList = []
            for item in tree.xpath("//ul[@class='playlist']/li"):
                s_data = {}
                s_data['title'] = item.xpath("./a[@class='grayNor'][1]/text()")[0]
                s_data['songLink'] = item.xpath("./a[@class='grayNor'][1]/@href")[0]
                s_data['artist'] = item.xpath("./a[@class='grayNor'][2]/text()")[0]
                s_data['dLink'] = item.xpath("./div/a[1]/@href")[0]
                s_data['_id'] = getCRC32Unsign(s_data['songLink'])
                s_data['fileName'] = regex(r"filename=(.+)", s_data['dLink'], 1)
                songList.append(s_data)
                
            '''
            print playlist information
            '''
            print '======================================'
            print 'Title: {0}'.format(strToASCII(aPlaylist['title']))
            print 'Post: {0}'.format(aPlaylist['postBy'])
            print 'List songs:'
            for song in songList:
                print '--', strToASCII(song['title'])
            print '======================================'
            #---    start download song
            
            pool = workerpool.WorkerPool(size=3)
            for song in songList:
                pool.put(DownloadJob(song['dLink'], song['songLink']))
            pool.shutdown()
            pool.wait()
            
            
            aPlaylist['songList'] = songList
            collection = dbconnect(__dbname__, 'playlist')
            collection.save(aPlaylist)
            
        except:
            err_msg = 'Error in function getAPlaylist "{0}" >> {1}: {2}'.format(strToASCII(aPlaylist['title']), sys.exc_info()[0], sys.exc_info()[1])
            print '>> ', err_msg
            log.error(err_msg)
            return
    
    def getListPL(self):
        print '>> ===== get list playlist ====='
        log.debug('Start getListPL')
        _playlist = []
        url = self._plUrl
        try:
            while url != '':
                cur_page = regex(r"(\d+)\.html", url, 1)
                tree = buildTree(url)
                for item in tree.xpath("//div[@class='result_row line pdleft rel']"):
                    p_data = {}
                    p_data['thumbnail'] = item.xpath(".//div[contains(@class, 'mgright')]/a/img/@src")[0]
                    saveImage(p_data['thumbnail'])
                    p_data['title'] = item.xpath(".//h1/a/text()")[0]
                    p_data['linkPlaylist'] = item.xpath(".//h1/a/@href")[0]
                    p_data['_id'] = getCRC32Unsign(p_data['linkPlaylist']) 
                    p_data['postBy'] = regex(r"params=/channel/(.+)", item.xpath(".//p[@class='txtDesc']/a/@href")[0], 1)
                    p_data['pubDate'] = regex(r"(\d+/\d+/\d+)", item.xpath(".//p[@class='txtDesc'][2]/text()")[0], 1)
                    p_data['luotNghe'] = regex(r"(\d+)$", item.xpath(".//p[@class='txtDesc'][2]/text()")[0], 1)
                    _playlist.append(p_data)
                    
                nextNode = tree.xpath("//li/a[@class='grayAc']/../following-sibling::*[1]/a/@href")
                if len(nextNode) > 0:
                    url = nextNode[0]
                    next_page = regex(r"(\d+)\.html", url, 1)
                    if int(next_page) < int(cur_page):
                        url = ''
                else:   
                    url = ''
        except:
            err_msg = "Error occurred in func getListPL {0}: {1}".format(sys.exc_info()[0], sys.exc_info()[1])
            print '-->', err_msg
            log.error(err_msg)
        finally:
            return _playlist
        
#---    main function ---
if __name__ == '__main__':
    start_time = time.time()
    nz = NhacZingVN()
    log.info('Start transload nhaczingvn')
    print '>>> Start transload nhaczingvn'
    plist = nz.getListPL()
    pool = workerpool.WorkerPool(size=2)
    pool.map(nz.getAPlist, plist)
    pool.shutdown()
    pool.wait()
    finish_time = time.time()
    log.info("Transload nhaczingvn finished at {0}".format(finish_time-start_time))
    print '>> Finished at {time}'.format(time=datetime.datetime.now())
    sys.exit()
    
    