# -*- coding: utf-8 -*-
'''
Created on Nov 11, 2010

@author: HoangNamHai
'''

try:
    import psyco
    psyco.full()
except ImportError:
    print 'Psyco not found'
    pass
import urllib, types, zlib, sys, re, os, time, datetime, hashlib
import traceback
import cStringIO as StringIO
import workerpool
import mechanize
from urllib import urlretrieve
from lxml import etree
from pymongo import Connection
from urlparse import urljoin
import logging
from mongolog.handlers import MongoHandler
import lxml.html

stringify = etree.XPath("string()")
localFilepath = '/home/hoangnamhai/HarvestedData/tintuc/xalo/'

def extractWithRegEx(pat, matchStr, matchIdx):
    ''' Hàm chạy regex và trả kết quả ở group[matchIdx]. Nếu không có kết quả -> trả về null'''
    try:
        result = ''
        rexp = re.compile(pat)
        m = rexp.search(matchStr)
        if (m!=''):
            result = m.group(matchIdx)
        return result
    except:
        return ''

def getMD5Hash(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def getMd5Path(stringToHash):
    s = getMD5Hash(stringToHash)
    s = '{0}/{1}/{2}/'.format(s[0], s[1], s[2])
    return s

def getMd5FileName(stringToHash):
    s = getMD5Hash(stringToHash)    
    s = '{0}/{1}/{2}/{3}'.format(s[0], s[1], s[2], s[3:])
    return s

def getUrlMainPart(url):
    ''' vd: input
        http://m.xalo.vn/17___5_xahoib_1_______.mtt_5_xahoi1_1__ffffcb632a528020_9cb3ed7____.mad
        http://m.xalo.vn/1_o_7_suckhoe1_1__12cbe463160_8932e5b____.mad '''
    result = extractWithRegEx(r"__([0-9,a-f]+_[0-9,a-f]+)__", url, 1).strip()
    if result == '': result = url
    print 'getUrlMainPart ', result
    return result

def saveImage(url):
    ''' Lưu ảnh xuống local với tên dựa file local dựa vào hash Md5. Nếu local đã có file rồi thì 0 load nữa. '''
    if re.search(r'file:/', url):   return ''
    saveFlag = False
    maxRetries = 3
    currentRetry = 0
    while not saveFlag and currentRetry < maxRetries:
        try:
            localFilename = '{0}{1}.jpg'.format(localFilepath, getMd5FileName(url))
            print '>> saveImage: {0} -> {1}'.format(url, localFilename)
            if not os.path.isfile(localFilename):
                if not os.path.exists(localFilepath + getMd5Path(url)):
                    os.makedirs(localFilepath + getMd5Path(url))
                urlretrieve(url, localFilename)
            saveFlag = True
        except:
            currentRetry += 1
            print '=> saveImage error: {0}'.format(url)
            exc_type, exc_value, exc_traceback = sys.exc_info()
            print repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
            log.error('saveImage error: {0} >> {1}'.format(url, repr(traceback.format_exception(exc_type, exc_value, exc_traceback))))
    return ''

def strToASCII(str):
    if str == '': return ''
    listPattern = ["á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", "Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   "đ", "Đ", "í|ì|ỉ|ị|ĩ", "Í|Ì|Ỉ|Ị|Ĩ", "é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", "É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   "ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", "Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   "ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", "Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", "ý|ỳ|ỷ|ỵ|ỹ", "Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    str = str.encode('utf-8', 'replace')
    for idx in range(len(listPattern)):
        str = re.sub(listPattern[idx], rep[idx], str)
    return str

def buildTree(url):
    tree = None
    maxRetries = 3
    currentRetry = 0
    flag = False
    errMsg = ''
    while not flag and currentRetry < maxRetries:
        http = None
        response = None
        try:
            http = mechanize.build_opener(mechanize.HTTPRefererProcessor, mechanize.HTTPErrorProcessor)
            response = http.open(url)
            responseURL = response.geturl()
            html = None
            if responseURL != 'http://m.xalo.vn/tintuc.mobi':
                html = response.read()
                tree = etree.parse(StringIO.StringIO(html), etree.HTMLParser(encoding='utf-8'))
                del html
                flag = True
        except:
            errMsg = "Error at ({0}, {1}:{2})".format(url, sys.exc_info()[0], sys.exc_info()[1])
        finally:
            if http:
                http.close()
                del http
            if response:
                response.close()
                del response
            currentRetry += 1
    
    if errMsg != '':
        print errMsg
        errMsg = None
    return tree

def countWord(str):
    c = 0
    if str is not None:
        str = re.sub(r"\s+", ' ', str).strip()
        c = str.count(' ')
    return c

def connectDB(dbName, collectionName, host='localhost', port=27017):
    connection = Connection(host, port)
    db = connection[dbName]
    return db[collectionName], connection

def getElementText(elem):
    str = ''
    if type(elem) == types.ListType and len(elem) > 0: elem = elem[0]
    if type(elem) == lxml.etree._Element:
        str = re.sub(r"\s+", ' ', lxml.html.fromstring(etree.tostring(elem)).text_content().strip())
    return str

def getAttrib(elem, attrib):
    str = ''
    if type(elem) == types.ListType and len(elem) > 0: elem = elem[0]
    if type(elem) == lxml.etree._Element:
        str = elem.get(attrib)
    return str.strip()

def getCRC32Unsign(textToHash=None):
    return str(zlib.crc32(textToHash) & 0xffffffffL)

def standardlizeTimeValue(timeStr):
    ''' Dùng để tính lại thời gian với các giá trị kiểu như: 34 phút trước, 1 giờ trước, hôm qua, hôm kia, 08/11/2010 '''
    timeStr = timeStr.encode('utf-8')
    now = datetime.datetime.now()
    listPat = {'phut': re.compile(r"(\d+)\sphút trước"), 'gio': re.compile(r"(\d+)\sgiờ trước"), 
           'homqua': re.compile(r"hôm qua"), 'homkia': re.compile(r"hôm kia"),
           'ngay': re.compile(r"(\d+)/(\d+)/(\d+)")}
    result = now
    for patName in listPat.keys():
        m = listPat[patName].search(timeStr)
        if m is not None:
            if patName == 'phut':
                result = now - datetime.timedelta(minutes = int(m.group(1)))
            elif patName == 'gio':
                result = now - datetime.timedelta(hours = int(m.group(1)))
            elif patName == 'homqua':
                result = now - datetime.timedelta(days = 1)
            elif patName == 'homkia':
                result = now - datetime.timedelta(days = 2)
            elif patName == 'ngay':
                result = datetime.datetime(day = int(m.group(1)), month = int(m.group(2)), year = int(m.group(3)),
                                           hour = now.hour, minute = now.minute, second = now.second)
    return result

def processArticle(url):
    log.debug('Start processArticle({0})'.format(url))
    totalWords = 0
    ret = []
    results = []
    while url != '':
        tree = None
        try:
            print ">> start processArticle {0}".format(url)
            tree = buildTree(url)
            if tree is None: break
            for item in tree.xpath("//div[@id='top'][@class='p']/descendant-or-self::*"):
                if item.tag == 'a':
                    hrefLink = urllib.unquote(extractWithRegEx(r"url=(.+)&type=ni", urljoin(url, item.get('href')), 1)).strip()
                    if hrefLink != '':
                        results.append({'type':'img', 'data': hrefLink})
                        saveImage(hrefLink)
                if item.text is not None or item.tail is not None:
                    itemText = getElementText(item)
                    itemText = re.sub(r"^\.|,", '', itemText)
                    if item.tag != 'a' and itemText != '':
                        totalWords += countWord(itemText)
                        if item.tag in ['b', 'strong', 'h2', 'h3']:
                            results.append({'type':'textbold', 'data': itemText})
                        else:
                            results.append({'type':'text', 'data': itemText})
            tiepElem = tree.xpath(unicode("//div[@class='r']//a[contains(.,'Ti')]"))
            url = urljoin(url, tiepElem[0].get('href')) if len(tiepElem)>0 else ''
        except:
            errMsg = "Error at ({0}) reason {1}: {2}".format(url, sys.exc_info()[0], sys.exc_info()[1])
            print "=> ", errMsg
            log.error(errMsg)
            errMsg = None
        finally:
            del tree
    # Những bài viết có quá ít kí tự thì coi là lỗi, không ghi lại
    print 'total words: ', totalWords
    if totalWords < 50:
        print 'WARNING - Có {0} từ trong bài viết - {1}'.format(totalWords, url)
    else:   
        ret = results
    return ret

def processPageList(url, chuyenMucId, currentProcessed=0, currentDup=0):
    print '>> start processPageList: ' + url
    log.debug('Start processPageList: {0}, {1}'.format(chuyenMucId, url))
    tree = buildTree(url)
    if tree is None:
        return currentProcessed, currentDup
    collection, connection = connectDB('tintuc', chuyenMucId)
    global totalNewsCrawled
    for artElem in tree.xpath("//div[@class='i']"):
        result = {}
        try:
            result['link'] = str(artElem.xpath('.//a')[0].get('href'))
            result['link'] = urljoin(url, result['link'])
            
            # 12/7/2010: Do xa lộ có link bài viết biến động theo ngày nên phải thêm hàm getUrlMainPart
            # để xác định phần cố định của URL này rồi mới chuyển vào để làm ID
            result['_id'] = getMD5Hash(getUrlMainPart(result['link']))
            isExist = collection.find_one({'_id': result['_id']})
            if not isExist:
                result['tieude'] = artElem.xpath('.//a')[0].text.strip()
                # print result['tieude'].decode('utf-8')
                temp = artElem.xpath('.//span[1]')
                result['nguontin'] = getElementText(artElem.xpath('.//span[1]')[0])
                result['thoigian'] = getElementText(artElem.xpath('.//span[2]')[0])
                result['thoigian'] = standardlizeTimeValue(result['thoigian'])
                
                temp = artElem.xpath('.//img')
                if len(temp) > 0:
                    result['thumb'] = getAttrib(temp[0], 'src')
                    result['thumb'] = urljoin(url, result['thumb'])
                    saveImage(result['thumb'])
                # print result
                result['detail'] = processArticle(result['link'])
                if len(result) < 1: continue
                result['lastupdate'] = str(datetime.datetime.now())
                result['timestamp'] = time.time()
                
                # Đề phòng trường hợp một số link không có dữ liệu
                if len(result['detail']) > 1:
                    totalNewsCrawled += 1
                    collection.save(result)
                    currentProcessed += 1 
                result = None
            else:
                currentDup = currentDup + 1
                print 'Article existed: {0} -> {1}'.format(result['link'], result['_id']) 
        except:
            errMsg = "Error at processPageList ({0}) reason {1}: {2}".format(url, sys.exc_info()[0], sys.exc_info()[1])
            print "=> ", errMsg
            log.error(errMsg)
            errMsg = None
            raise
        finally:
            del result
    if connection: connection.end_request()
    del tree
    print '{0}: Current procesed = {1}; Current dup = {2}'.format(chuyenMucId, currentProcessed, currentDup)
    return currentProcessed, currentDup

def processPageList2(url, chuyenMucId, currentProcessed=0, currentDup=0):
    ''' Các anh Xa lộ củ chuối, từ trang 6 trở đi các anh ý format kiểu khác. Vậy nên sinh ra hàm này để
        xử lý các trang với p>=6.
        Chú ý: 
            - trong page listing dạng này các bài viết không có thông tin về nguồn tin.
            - không có thông tin về ảnh thumbnail
    '''
    print '>> start processPageList2: ' + url
    log.debug('Start processPageList2: {0}, {1}'.format(chuyenMucId, url))
    tree = buildTree(url)
    if tree is None:
        return currentProcessed, currentDup
    collection, connection = connectDB('tintuc', chuyenMucId)
    global totalNewsCrawled
    for artElem in tree.xpath("//div[@class='w']"):
        result = {}
        try:
            result['link'] = str(artElem.xpath('.//a')[0].get('href'))
            result['link'] = urljoin(url, result['link'])
            result['_id'] = getMD5Hash(getUrlMainPart(result['link']))
            
            isExist = collection.find_one({'_id': result['_id']})
            if not isExist:
                result['tieude'] = artElem.xpath('.//a')[0].text.strip()
                # print result['tieude']
                print result['link']
                temp = artElem.xpath('.//span[1]')
                # result['nguontin'] = artElem.xpath('.//span[1]')[0].text.strip()
                result['thoigian'] = getElementText(artElem.xpath('.//span[1]')[0])
                result['thoigian'] = standardlizeTimeValue(result['thoigian'])
                
                temp = artElem.xpath('.//img')
                if len(temp) > 0:
                    result['thumb'] = getAttrib(temp, 'src')
                    result['thumb'] = urljoin(url, result['thumb'])
                    # saveImage(result['thumb'])
                # print result
                result['detail'] = processArticle(result['link'])
                if len(result) < 1: continue
                result['lastupdate'] = str(datetime.datetime.now())
                result['timestamp'] = time.time()
                if len(result['detail']) > 1:
                    collection.save(result)
                    totalNewsCrawled += 1
                    currentProcessed += 1 
                result = None
            else:
                currentDup = currentDup + 1
                print 'Article existed: {0} -> {1}'.format(result['link'], result['_id']) 
        except:
            errMsg = "Error at processPageList2 ({0}) reason {1}: {2}".format(url, sys.exc_info()[0], sys.exc_info()[1])
            print "=> ", errMsg
            log.error(errMsg)
            errMsg = None
        finally:
            del result
    del tree
    if connection: connection.end_request()
    print '{0}: Current procesed = {1}; Current dup = {2}'.format(chuyenMucId, currentProcessed, currentDup)
    return currentProcessed, currentDup

def tryToFixErrorNews():
#    acn = Connection("123.30.181.108", 27017)
#    adb = acn['tintuc']
#    acollection = adb['khoahoc']
    print "--------------- ERROR LINK ------------------"
    hasErr = False
    for chuyenMucId in listChuyenMuc:
        collection = connectDB('tintuc', chuyenMucId)
        for news in collection.find({"detail": {"$size": 0}}, {'link': 1, '_id': 1, 'tieude': 1}):
            if not hasErr: hasErr = True
            print news['tieude']
            url = news['link']
            print url
        for news in collection.find({"tieude": ""}, {'link': 1, '_id': 1, 'tieude': 1}):
            print news['link']
            if not hasErr: hasErr = True
            
    if not hasErr:
        print 'there isn\'t any error link'
    print "---------------------------------------------"
#            if re.search(r"_\.mtt_", url):
#                detail = processPageList(url, chuyenMucId)
#            else:
#                detail = processPageList2(url, chuyenMucId)
#            if len(detail) > 3:
#                news['detail'] = detail
#                collection.save(news)
#            else:
#                collection.remove({'_id': news['_id']})

def processChuyenMuc(chuyenMucId):
    duplicateThreshold = 30
    maxProcessThreshold = 200
    currentDupThresHold = 0
    currentProcessThreshold = 0
    try:
        for pageIndex in range(1, maxPage):
            if currentDupThresHold >= duplicateThreshold:
                print 'Chuyên mục: {0} dừng vì trùng quá ngưỡng cho phép'.format(chuyenMucId)
                return
            if currentProcessThreshold >= maxProcessThreshold:
                print 'Chuyên mục: {0} dừng vì đã xử lý nhiều quá ngưỡng cho phép'.format(chuyenMucId)
                return
            url = 'http://m.xalo.vn/{0}.mcm?p={1}&t={2}'.format(chuyenMucId, pageIndex, time.time())
            if pageIndex <= 5:
                currentProcessThreshold, currentDupThresHold = processPageList(url, chuyenMucId, currentProcessThreshold, currentDupThresHold)
            else:        
                currentProcessThreshold, currentDupThresHold = processPageList2(url, chuyenMucId, currentProcessThreshold, currentDupThresHold)
    except:
        errMsg = "There an error in processChuyenMuc({0}) trace: {1} >> {2}".format(chuyenMucId, sys.exc_info()[0], sys.exc_info()[1])
        print "=> ", errMsg
        log.critical(errMsg)
        os._exit(1)
    return


if __name__ == '__main__':
    listChuyenMuc = ['xahoi','thegioi','thethao','kinhdoanh','giaitri','giaoduc','phapluat','suckhoe','tinhyeugioitinh','khoahoc','congngheso','otoxemay','chuyenla']
    
    maxPage = 20
    totalNewsCrawled = 0
    
    log = logging.getLogger('xaloNews')
    log.setLevel(logging.INFO)
    log.addHandler(MongoHandler.to('mongolog', 'log'))
    
#    tryToFixErrorNews()
    
    log.info('Start crawler tin tuc xalo.vn')
    pool = workerpool.WorkerPool(size=5)
    pool.map(processChuyenMuc, listChuyenMuc)
    pool.shutdown()
    pool.wait()
    
    log.info('Finished crawler tin tuc xalo.vn')
    log.info('Total news crawled xalo.vn: {0}'.format(totalNewsCrawled))
    print ">> Finished at ", datetime.datetime.now()
    os._exit(1)
    