# -*- coding: utf-8 -*-

import urllib
from urllib import urlretrieve
from lxml import etree
from pymongo import Connection
import cStringIO as StringIO
import hashlib
import workerpool
import datetime
import time
import sys
import os
import re
from urlparse import urljoin
import logging
from mongolog.handlers import MongoHandler
import lxml.html


stringify = etree.XPath("string()")
localFilepath = "/home/hoangnamhai/HarvestedData/yahooanswer/"
rootURL = "http://vn.answers.yahoo.com"


listPat = {'gio': re.compile(r"(\d+)\sgiờ trước"), 
           'ngay': re.compile(r"(\d+)\sngày"), 
           'tuan': re.compile(r"(\d+)\stuần"),
           'giay': re.compile(r"(\d+)\sgiây"),
           'phut': re.compile(r"(\d+)\sphút")}

def getMD5Hash(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def getMd5Path(stringToHash):
    s = getMD5Hash(stringToHash)
    s = '{0}/{1}/{2}/'.format(s[0], s[1], s[2])
    return s

def strToASCII(str):
    if str == '': return ''
    listPattern = [r"á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", r"Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   r"đ", r"Đ", r"í|ì|ỉ|ị|ĩ", r"Í|Ì|Ỉ|Ị|Ĩ", r"é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", r"É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   r"ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", r"Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   r"ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", r"Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", r"ý|ỳ|ỷ|ỵ|ỹ", r"Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    str = str.encode('utf-8', 'replace')
    for idx in range(len(listPattern)):
        str = re.sub(listPattern[idx], rep[idx], str)
    return str

def getMd5FileName(stringToHash):
    s = getMD5Hash(stringToHash)    
    s = '{0}/{1}/{2}/{3}'.format(s[0], s[1], s[2], s[3:])
    return s

def getElementText(elem):
    if elem == None: return ''
    t = lxml.html.fromstring(etree.tostring(elem))
    return t.text_content().strip()

def getAttributeText(elem, attrib):
    if elem == None or attrib == '': return ''
    return elem.get(attrib)

def saveImage(url):
    ''' Lưu ảnh xuống local với tên dựa file local dựa vào hash Md5. Nếu local đã có file rồi thì 0 load nữa. '''
    try:
        localFilename = '{0}{1}.jpg'.format(localFilepath, getMd5FileName(url))
        print 'saveImage: {0} -> {1}'.format(url, localFilename)
        if not os.path.isfile(localFilename):
            if not os.path.exists(localFilepath + getMd5Path(url)):
                os.makedirs(localFilepath + getMd5Path(url))
            urlretrieve(url, localFilename)
        return localFilename
    except:
        err_str = 'saveImage error: {0} >> {1}'.format(url, sys.exc_info()[1])
        print err_str
        log.error(err_str)
        pass
    return ''      

def buildTree(url):
    result = urllib.urlopen(url)
    html = result.read()
    parser = etree.HTMLParser(encoding='utf-8')
    tree = etree.parse(StringIO.StringIO(html), parser)
    return tree

def extractWithRegEx(pat, matchStr, matchIdx):
    ''' Hàm chạy regex và trả kết quả ở group[matchIdx]. Nếu không có kết quả -> trả về null'''
    try:
        result = ''
        rexp = re.compile(pat)
        m = rexp.search(matchStr)
        if (m!=''):
            result = m.group(matchIdx)
        return result
    except:
        return ''

def standardlizeTimeValue(timeStr):
    ''' Dùng để tính lại thời gian với các giá trị kiểu như: 34 phút trước, 1 giờ trước, hôm qua, hôm kia, 08/11/2010 '''
    now = datetime.datetime.now()
    result = now
    for patName in listPat.keys():
        m = listPat[patName].search(timeStr)
        if m is not None:
            if patName == 'ngay':
                result = now - datetime.timedelta(days = int(m.group(1)))
            elif patName == 'gio':
                result = now - datetime.timedelta(hours = int(m.group(1)))
            elif patName == 'tuan':
                result = now - datetime.timedelta(weeks = int(m.group(1)))
            elif patName == 'giay':
                result = now - datetime.timedelta(seconds = int(m.group(1)))
            elif patName == 'phut':
                result = now -datetime.timedelta(minutes = int(m.group(1)))
    return result

def getQuestionDetail(url):
    log.debug("Start getQuestionDetail({0})".format(url))
    result = []
    try:
        tree = buildTree(url)
        questionContentNode = tree.xpath("//div[@id='yan-question']//div[@class='content']")
        content = ""
        if questionContentNode is not None:
            if len(questionContentNode) > 0:
                content = getElementText(questionContentNode[0])
        result.append(content)
        # -- answer region --
        ansReturn = []
        for item in tree.xpath("//div[@class='bd']//div[@class='qa-container']"):
            row = {}
            ansContentArr = item.xpath("./div[@class='content']/text()")
            ansContent = ""
            if len(ansContentArr) > 0:
                ansContent = ansContentArr[0]
            row['timeAnswer'] = str(standardlizeTimeValue(getElementText(item.xpath("./ul[@class='meta']/li[1]")[0])))
            row['ansContent'] = ansContent
            ansReturn.append(row)
        result.append(ansReturn)
    except:
        errStr = "<< ERROR [{0}] --> {1} >>".format(url, sys.exc_info()[1])
        log.error(errStr)
        print errStr
        pass
    return result

def getQuestions(aCategory):
    log.debug("Start getQuestions({0})".format(aCategory))
    types = ["resolved", "vote", "list"]
    connection = Connection('localhost', 27017)
    db = connection['yahooanswer']
    collection = db[aCategory['cat']]
    global maxProcessThreshold, totalQuestionCrawled
    for type in types:
        url = "{0}&link={1}".format(aCategory['link'], type)
        print "\n>>>> \tProcessing page {0} [{1}]".format(strToASCII(aCategory['catName']), url)
        data = {}
        currentDuplicate = 0
        countPost = 0
        try:
            while countPost < maxProcessThreshold:
                tree = buildTree(url)
                postNodes = tree.xpath("//ul[@class='questions']/li")
                if len(postNodes) < 1: break
                for item in postNodes:
                    linkNode = item.xpath("./div/h3/a")
                    postLink = urljoin(url, linkNode[0].get('href').strip())
                    data['_id'] = getMD5Hash(postLink)
                    countPost += 1
                    if countPost > maxProcessThreshold: break
    
                    data['title'] = getElementText(linkNode[0])
                    strDate = item.xpath("./div/ul[@class='meta']/li[5]/text()")
                    data['updateTime'] = str(standardlizeTimeValue(strDate[0]))
                    data['postLink'] = postLink
                    detailArr = getQuestionDetail(postLink)
                    data['content'] = detailArr[0]
                    if data['content'] == '' and data['title'] != '':   data['content'] = data['title']
                    data['answer'], data['timestamp'] = detailArr[1], str(time.time())
                    collection.save(data)
                    # -- Print to test ---
                    print "\nProcess question: {0}".format(postLink)
                    print "\n\tTitle: {0}".format(strToASCII(data['title']))
                    print "\n\tContent: {0}".format(strToASCII(data['content']))
                    
                nextPagination = tree.xpath("//div[@class='pagination']//li[@class='next']/a")
                if len(nextPagination) > 0:
                    url = urljoin(url, nextPagination[0].get('href'))
                    print "\n>>>>>>>>>>> PAGE : {0} >>>>>>>>>>>".format(url)
                else:
                    break
        except:
            errStr = "<< ERROR [{0}] --> {1} >>".format(aCategory, sys.exc_info()[1])
            log.error(errStr)
            print errStr
            pass
        
        totalQuestionCrawled += countPost

def detailTieuDiem(item):
    connection = Connection('localhost', 27017)
    db = connection['yahooanswer']
    collection = db['tieu-diem']
    item['_id'] = getMD5Hash(item['qLink'])
    item['detail'] = getQuestionDetail(item['qLink'])
    item['timestamp'] = str(time.time())
    item['lastupdate'] = str(datetime.datetime.now())
    collection.save(item)

def detailHayNhat(item):
    connection = Connection('localhost', 27017)
    db = connection['yahooanswer']
    collection = db['hay-nhat']
    item['_id'] = getMD5Hash(item['qLink'])
    item['detail'] = getQuestionDetail(item['qLink'])
    item['timestamp'] = str(time.time())
    item['lastupdate'] = str(datetime.datetime.now())
    collection.save(item)

def yahooTieuDiem(node):
    global rootURL
    print "\n>>>>> Start lay tieu diem, hay nhat >>>>>"
    log.debug("Start yahooTieuDiem")
    #-- get tieu diem --
    data = []
    for item in node.xpath("./li/h2[contains(., 'Tiêu điểm')]/parent::*".decode('utf-8')):
        logo = item.xpath("./div[1]//img[@class='logo']")[0].get('src')
        description = getElementText(item.xpath("./div[1]//p")[0])
        question = getElementText(item.xpath("./div[2]//h3/a")[0])
        qLink = urljoin(rootURL, getAttributeText(item.xpath("./div[2]//h3/a")[0], 'href'))
        data.append({'_id': getMD5Hash(qLink),'logo': logo, 'question': question, 'description': description, 'qLink': qLink})
        # -- Print to test --
        print "Title: ", strToASCII(question)
        print strToASCII(description) + "\n"
    
    pool = workerpool.WorkerPool(size=3)
    pool.map(detailTieuDiem, data)
    pool.shutdown()
    pool.wait()
    
    #-- get hay nhat --
    data = []
    for item in node.xpath("./li/h2[contains(., 'Hay nhất')]/parent::*".decode('utf-8')):
        question = getElementText(item.xpath("./div[1]//h3/a")[0])
        qLink = urljoin(rootURL, getAttributeText(item.xpath("./div[1]//h3/a")[0], 'href'))
        qAvatar = urljoin(rootURL, getAttributeText(item.xpath("./div[1]//img[@class='avatar']")[0], 'src'))
        ansDescription = getElementText(item.xpath("./div[2]//p")[0])
        ansAvatar = urljoin(rootURL, getAttributeText(item.xpath("./div[2]//img[@class='avatar']")[0], 'src'))
        data.append({'_id': getMD5Hash(qLink), 'question': question, 'qLink': qLink, 'qAvatar': qAvatar, 'ansAvatar': ansAvatar, 'ansDescriptioin': ansDescription})
        # -- Print to test --
        print "Title", strToASCII(question)
        print strToASCII(ansDescription) + "\n"
        
    pool = workerpool.WorkerPool(size=3)
    pool.map(detailHayNhat, data)
    pool.shutdown()
    pool.wait()

def getListCategory(url='http://vn.answers.yahoo.com/'):
    log.debug("Start getListCategory({0})".format(url))
    print "......................LOADING CATEGORY............................\n\n"
    tree = buildTree(url)
    #yahooTieuDiem(tree.xpath("//div[@id='yan-bestof']/div/ul")[0])
    #print "\n>>>> Start crawler categories >>>>"
    categories = []
    for row in tree.xpath("//li[@id='yan-nav-browse']//ul"):
        for iLink in row.xpath("./li/a"):
            name = getElementText(iLink)
            ahref = urljoin(url, getAttributeText(iLink, 'href'))
            cat = re.sub(r"\s+", '-', re.sub("&", '', strToASCII(name).lower()))
            categories.append({'catName': name, 'cat': cat, 'link': ahref})
            # -- Print to test --
            print "{0}:\t{1}\t{2}\n".format(strToASCII(name), ahref, cat)
    return categories


log = logging.getLogger('yahooanswer')
log.setLevel(logging.DEBUG)
log.addHandler(MongoHandler.to('mongolog', 'log'))

if __name__ == '__main__':
    maxProcessThreshold = 5000
    totalQuestionCrawled = 0
    log.debug("Start crawler yahoo answer")
    categories = getListCategory()
    pool = workerpool.WorkerPool(size=10)
    pool.map(getQuestions, categories)
    pool.shutdown()
    pool.wait()
    print "\n==> Total question crawled: {0}".format(totalQuestionCrawled)
    log.debug("Crawler yahoo answer finished")

print "Đã xong", datetime.datetime.now()
sys.exit()
