# -*- coding: utf-8 -*-

import mechanize
from mechanize import Browser
from pymongo import Connection
import zlib
import datetime
import sys
import logging
from mongolog.handlers import MongoHandler
import re
import cStringIO as StringIO
import lxml.html
from lxml import etree
import workerpool
from mechanize import urljoin

# <!--- basic function --
def getCRC32Unsign(textToHash=None):
    return str(zlib.crc32(textToHash) & 0xffffffffL)

def buildTree(url):
    userAgent = "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13"
    br = Browser()
    br.addheaders = [("User-Agent", userAgent)]
    br.open(url)
    html = br.response().read()
    parser = etree.HTMLParser(encoding='utf-8')
    tree = etree.parse(StringIO.StringIO(html), parser)
    return tree

def getElementText(elem):
    if elem is None: return ''
    t = lxml.html.fromstring(etree.tostring(elem))
    return t.text_content().strip()

def getAttributeText(node, attrb):
    if node is None or attrb == '': return ''
    return node.get(attrb)

def extractWithRegEx(pat, matchStr, matchIdx):
    ''' Hàm chạy regex và trả kết quả ở group[matchIdx]. Nếu không có kết quả -> trả về null'''
    try:
        result = ''
        rexp = re.compile(pat)
        m = rexp.search(matchStr)
        if (m!=''):
            result = m.group(matchIdx)
        return result
    except:
        return ''

def strToASCII(str):
    if str == '': return ''
    try:
        listPattern = [r"á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", r"Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                       r"đ", r"Đ", r"í|ì|ỉ|ị|ĩ", r"Í|Ì|Ỉ|Ị|Ĩ", r"é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", r"É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                       r"ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", r"Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                       r"ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", r"Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", r"ý|ỳ|ỷ|ỵ|ỹ", r"Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
        rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
        str = str.encode('utf-8', 'replace')
        for idx in range(len(listPattern)):
            str = re.sub(listPattern[idx], rep[idx], str)
        return str
    except:
        pass

def standardlizeTimeValue(timeStr):
    ''' Dùng để tính lại thời gian với các giá trị kiểu như: 34 phút trước, 1 giờ trước, hôm qua, hôm kia, 08/11/2010 '''
    now = datetime.datetime.now()
    result = now
    timeStr = timeStr.encode('utf-8', 'ignore')
    for patName in listPat.keys():
        m = listPat[patName].search(timeStr)
        if m is not None:
            if patName == 'ngay':
                result = now - datetime.timedelta(days = int(m.group(1)))
            elif patName == 'gio':
                result = now - datetime.timedelta(hours = int(m.group(1)))
            elif patName == 'tuan':
                result = now - datetime.timedelta(weeks = int(m.group(1)))
            elif patName == 'giay':
                result = now - datetime.timedelta(seconds = int(m.group(1)))
            elif patName == 'phut':
                result = now -datetime.timedelta(minutes = int(m.group(1)))
    return result

# -- end basic function --!>

def getListCategory(url='http://vn.answers.yahoo.com/'):
    log.debug("Start getListCategory({0})".format(url))
    print ">>> LOADING CATEGORY...\n\n"
    tree = buildTree(url)
    categories = []
    for row in tree.xpath("//li[@id='yan-nav-browse']//ul"):
        for iLink in row.xpath("./li/a"):
            name = getElementText(iLink)
            ahref = urljoin(url, getAttributeText(iLink, 'href'))
            cat = re.sub(r"[\s+&]", '', strToASCII(name).lower())
            categories.append({'catName': name, 'cat': cat, 'link': ahref})
            # -- Print to test --
#            print "{0}:\t{1}\t{2}\n".format(strToASCII(name), ahref, cat)
    return categories

def categoriesProcess(cat):
    log.debug("Start categoriesProcess({0})".format(cat))
    print ">>> start category({0})".format(cat['catName'].encode('utf-8', 'ignore'))
    types = ["resolved", "vote", "list"]
    connection = Connection('localhost', 27017)
    db = connection['yahooanswer']
    collection = db[cat['cat']]
    count = 0
    for type in types:
        url = "{0}&link={1}\n".format(cat['link'], type)
        print "url: {0}".format(url)
        try:
            while count < maxProcessThreshold:
                tree = buildTree(url)
                contentNode = tree.xpath("//ul[@class='questions']/li/div")
                if len(contentNode) < 1:    
                    log.warn("Category doesn't contain any question and answer <{0}>".format(cat['catName']))
                    break
                for iq in contentNode:
                    title = getElementText(iq.xpath("./h3/a")[0])
                    link = urljoin(url, getAttributeText(iq.xpath("./h3/a")[0], 'href'))
                    id = getCRC32Unsign(link)
                    time = str(standardlizeTimeValue(getElementText(iq.xpath("./ul/li[contains(., 'cách đây')]".decode('utf-8'))[0])))
                    numberOfAnswer = extractWithRegEx(r"(\d+)\s", getElementText(iq.xpath("./ul/li[contains(., 'trả lời')]".decode('utf-8'))[0]), 1)
                    userOfAnswer = re.sub(r"-|hỏi", '', getElementText(iq.xpath("./ul/li/a[@class='fn']")[0]).encode('utf-8'))
                    userOfAnswer = re.sub(r"\s+", ' ', userOfAnswer).strip()
                    detail = details(link)
                    if count < maxProcessThreshold:
                        count += 1
                        collection.save({'_id': id, 'title': title, 'link': link, 'time': time, 'numberOfAnswer': numberOfAnswer, 'userOfAnswer': userOfAnswer,
                                         'detail': detail['content'], 'answerList': detail['answers']})
                    else:
                        log.info("Vuot qua gioi han so bai viet: {0}".format(maxProcessThreshold))   
                        break
                    # -- print --
                    print "\tTitle: {0}".format(title.encode('utf-8', 'ignore'))
                    print "\tTime: {0}".format(time)
                    print "\tContent: {0}".format(detail['content'].encode('utf-8', 'ignore'))
                    print "\t--------------------------------------"
                    for ianw in detail['answers']:
                        print "\t{0}".format(ianw['content'].encode('utf-8', 'ignore'))
                    print "\t--------------------------------------"
                nextNode = tree.xpath("//div[@class='pagination']//li[@class='current']/following-sibling::*[1]/a")
                if len(nextNode) < 1:   break
                url = urljoin(cat['link'], getAttributeText(nextNode[0], 'href'))
        except:
            err = "{0} at {1}".format(sys.exc_info()[1], strToASCII(cat['catName']))
            log.error(err)
            print err
            pass

def details(url):
    print ">>> start details({0})".format(url)
    log.debug("Start details({0})".format(url))
    data = {}
    data['answers'] = []
    try:
        tree = buildTree(url)
        # -- get content of answer --
        contentNode = tree.xpath("//div[@id='yan-question']//div[@class='content']")
        data['content'] = ''
        if len(contentNode) > 0:
            content = getElementText(contentNode[0])
        # -- get best answer --
        bestNode = tree.xpath("//div[@class='answer best']/div[@class='qa-container']")
        if len(bestNode) > 0:
            ans = {}
            ans['content'] = getElementText(bestNode[0].xpath("./div[@class='content']")[0])
            ans['time'] = str(standardlizeTimeValue(getElementText(bestNode[0].xpath("./ul[@class='meta']/li[1]")[0])))
            ans['type'] = 'best'
            data['answers'].append(ans)
        # -- get list answer --
        for ia in tree.xpath("//div[@id='yan-answers']//ul[@class='shown']/li//div[@class='qa-container']"):
            ans = {}
            ans['content'] = getElementText(ia.xpath("./div[@class='content']")[0])
            ans['time'] = str(standardlizeTimeValue(getElementText(ia.xpath("./ul[@class='meta']/li[1]")[0])))
            ans['type'] = 'normal'
            data['answers'].append(ans)
    except:
        err = "{0} at {1}".format(sys.exc_info()[1], url)
        log.error(err)
        print err
        pass

    return data


if __name__ == '__main__':
    log = logging.getLogger('yahooanswer')
    log.setLevel(logging.INFO)
    log.addHandler(MongoHandler.to('mongolog', 'log'))
    listPat = {'gio': re.compile(r"(\d+)\sgiờ"), 
           'ngay': re.compile(r"(\d+)\sngày"), 
           'tuan': re.compile(r"(\d+)\stuần"),
           'giay': re.compile(r"(\d+)\sgiây"),
           'phut': re.compile(r"(\d+)\sphút")}
    maxProcessThreshold = 400
    categories = getListCategory()
    print "OK : {0} categories".format(len(categories))
    log.info("Start crawler yahoo answer")
    pool = workerpool.WorkerPool(size=10)
    pool.map(categoriesProcess, categories)
    pool.shutdown()
    pool.wait()
    log.info("Crawler yahoo answer finished")
    print ">>> Finished at {0}".format(str(datetime.datetime.now()))
    sys.exit()


