# -*- coding: utf-8 -*-
import workerpool
import lxml.html
import cStringIO as StringIO
from lxml import etree
from pymongo import Connection
from urlparse import urljoin
import re 
import hashlib
import os
import sys
from urllib import urlretrieve
import time
import datetime
import logging
from mongolog.handlers import MongoHandler
import urllib


def buildTree(url, procXML = False, outputHTML=False):
    result = urllib.urlopen(url)
    html = result.read()
    if not procXML:
        parser = etree.HTMLParser(encoding='utf-8')
    else:
        parser = etree.XMLParser(encoding='utf-8')
    tree = etree.parse(StringIO.StringIO(html), parser)
    if outputHTML: print html
    return tree

def getMD5Hash(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def getMd5Path(stringToHash):
    s = getMD5Hash(stringToHash)
    s = '{0}/{1}/{2}/'.format(s[0], s[1], s[2])
    return s

def getMd5FileName(stringToHash):
    s = getMD5Hash(stringToHash)    
    s = '{0}/{1}/{2}/{3}'.format(s[0], s[1], s[2], s[3:])
    return s

def getElementText(elem):
    if elem == None: return ''
    t = lxml.html.fromstring(etree.tostring(elem))
    return t.text_content().strip()

def strToASCII(str):
    if str == '': return ''
    listPattern = [r"á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", r"Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   r"đ", r"Đ", r"í|ì|ỉ|ị|ĩ", r"Í|Ì|Ỉ|Ị|Ĩ", r"é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", r"É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   r"ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", r"Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   r"ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", r"Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", r"ý|ỳ|ỷ|ỵ|ỹ", r"Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    str = str.encode('utf-8', 'replace')
    for idx in range(len(listPattern)):
        str = re.sub(listPattern[idx], rep[idx], str)
    return str

def getAttributeText(elem, attr):
    if elem is None or attr == '':
        return ''
    return elem.get(attr)

def extractWithRegEx(pat, matchStr, matchIdx):
    ''' Hàm chạy regex và trả kết quả ở group[matchIdx]. Nếu không có kết quả -> trả về null'''
    try:
        result = ''
        rexp = re.compile(pat)
        m = rexp.search(matchStr)
        if (m!=''):
            result = m.group(matchIdx)
        return result
    except:
        return ''

def dbConnection(database="nhaccuatui", col="album"):
    try:
        connection = Connection('localhost', 27017)
        db = connection[database]
        collection = db[col]
        return collection
    except:
        print sys.exc_info()
        sys.exit()

def saveImage(url):
    ''' Lưu ảnh xuống local với tên dựa file local dựa vào hash Md5. Nếu local đã có file rồi thì 0 load nữa. '''
    try:
        localFilename = '{0}{1}.jpg'.format(localFilepath, getMd5FileName(url))
#        print 'saveImage: {0} -> {1}'.format(url, localFilename)
        if not os.path.isfile(localFilename):
            if not os.path.exists(localFilepath + getMd5Path(url)):
                os.makedirs(localFilepath + getMd5Path(url))
            urlretrieve(url, localFilename)
        return localFilename
    except:
        err_str = 'saveImage error: {0} >> {1}'.format(url, sys.exc_info()[1])
        print err_str
        log.error(err_str)
        pass
    return ''             

def saveFile(tmpLink, url, retryTime=1):
    log.debug("Start saveFile({0}, {1})".format(tmpLink, url))
    global maxRetryTimes
    retryTime += 1
    try:
        nameOfFile = os.path.basename(tmpLink)
        ext = nameOfFile.split('.')[1]
        localFilename = '{0}{1}.{2}'.format(localMusicpath, getMd5FileName(url), ext)
#        print 'saveFile: {0} -> {1}'.format(url, localFilename)
        if not os.path.isfile(localFilename):
            if not os.path.exists(localMusicpath + getMd5Path(url)):
                os.makedirs(localMusicpath + getMd5Path(url))
            print "start download file {0}".format(localFilename)
            log.debug("start download file {0}".format(localFilename))
            urlretrieve(tmpLink, localFilename)
            print "finished download file {0}".format(localFilename)
            log.debug("finished download file {0}".format(localFilename))
        return localFilename
    except:
        if retryTime < maxRetryTimes:
            try:
                saveFile(tmpLink, url)
            except:
                err_str = 'saveFile error: {0} ({1}) >> {2}'.format(tmpLink, url, sys.exc_info()[1])
                print err_str
                log.error(err_str)
                raise
    return ''

def process(aPlayList):
    log.debug("Start a thread with param: {0}".format(aPlayList))
    global collection, totalPlaylistCrawled
    print strToASCII(aPlayList['title'])
    aPlayList['songList'] = processPlayList(aPlayList['linkPlayList'])
    if not collection.find_one({'_id': aPlayList['_id']}):  
        collection.save(aPlayList)
        totalPlaylistCrawled += 1

def processPlayList(url, retryTime=1):
    log.debug("Start processPlayList, [params: {0}]".format(url))
    global maxRetryTimes
    songList = []
    retryTime += 1
    try:
        tree = buildTree(url, False)
        songsListURL = extractWithRegEx(r"=(.+)", getAttributeText(tree.xpath("//div[@id='flash-playerlist-holder']//embed")[0], 'src'), 1)
        xTree = buildTree(songsListURL, True)
        for iTrack in xTree.xpath("//track"):
            songs = {}
            songs['title'] = getElementText(iTrack.xpath(".//title")[0])
            songs['artist'] = getElementText(iTrack.xpath(".//creator")[0])
            songs['tmpLink'] = getElementText(iTrack.xpath(".//location")[0])
            songs['songLink'] = getElementText(iTrack.xpath(".//info")[0])
            saveFile(songs['tmpLink'], songs['songLink'])
            songs['songID'] = getMD5Hash(songs['songLink'])
#            print songs['title']
#            print songs['songLink']
            songList.append(songs)
    except:
        if retryTime < maxRetryTimes:
            try:
                processPlayList(url, retryTime)
            except:
                err_str = "{0}".format(sys.exc_info()[1])
                print err_str
                log.error(err_str)
                raise
    return songList

def getPlayList(url='http://www.nhaccuatui.com/playlist?type=chonloc', retryTimes=1):
    log.debug("Start getPlayList, [params: {0}]".format(url))
    global collection, maxRetryTimes
    retryTimes += 1
    try:
        while 1:
            print url
            tree = buildTree(url)
            arr = []
            contentNode = tree.xpath("//div[@class='grid-playlist-list']//div[@class='item']")
            if len(contentNode) < 1:    break
            else:
                for item in contentNode:
                    data = {}
                    data['linkPlayList'] = urljoin(url, getAttributeText(item.xpath(".//h2[@class='title']/a")[0], 'href'))
                    data['_id'] = getMD5Hash(data['linkPlayList'])
                    if collection.find_one({'_id': data['_id']}):
                        print "\n << Already exist in database >>"   
                        continue
                    data['thumbnail'] =  ''
                    if len(item.xpath(".//div[@class='left-col']//img")) > 0:
                        data['thumbnail'] = urljoin(url, getAttributeText(item.xpath(".//div[@class='left-col']//img")[0], 'src'))
                        saveImage(data['thumbnail'])
                    data['title'] = getElementText(item.xpath(".//h2[@class='title']")[0])
                    data['description'] = getElementText(item.xpath(".//div[@class='description']")[0])
                    data['postBy'] = getElementText(item.xpath(".//div[@class='summary-info']/a")[0])
                    data['luotNghe'] = getElementText(item.xpath(".//div[@class='summary-info']/span[contains(., 'lượt nghe')]".decode('utf-8'))[0])
                    arr.append(data)
                pool = workerpool.WorkerPool(size=5)
                pool.map(process, arr)
                pool.shutdown()
                pool.wait()
                
                nextPage = getAttributeText(tree.xpath("//div[@class='group-paging-type']//span[contains(@class, 'current')]/following::*[1]")[0], 'href')
                url = urljoin(url, nextPage)
    except:
        if retryTimes < maxRetryTimes:
            try:
                getPlayList(url, retryTimes)
            except:
                err_str = "{0}".format(sys.exc_info()[1])
                print err_str
                log.error(err_str)
                pass
        
collection = dbConnection()

log = logging.getLogger('nhaccuatui')
log.setLevel(logging.DEBUG)
log.addHandler(MongoHandler.to('mongolog', 'log'))
localFilepath = '/home/hoangnamhai/HarvestedData/nhaccuatui/'
localMusicpath = '/home/hoangnamhai/music/'
maxRetryTimes = 3
totalPlaylistCrawled = 0
log.debug("Start crawler nhaccuatui")

getPlayList()

log.debug("Total playlist crawled in nhaccuatui: {0}".format(totalPlaylistCrawled))
log.debug("Crawler nhaccuatui finished")
print "\n=== FINISH ===", str(datetime.datetime.now())
sys.exit()

    