# -*- coding: utf-8 -*-

import traceback, os, re
import commonlib, datetime, time
import workerpool
import html2dict
import mechanize
import argparse
from MongoModel import MongoModel
from lxml import etree
from termcolor import cprint

MAX_PROCESSED = 15
MAX_DUPLICATED = 10
DBNAME = 'tintuc_v2'
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
MONGO_PORT = 27017
PREFIX = '/uploads/news/'
logger = commonlib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'thethaovanhoa')

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9) # kill -9 pid tren linux
    
class NewsCrawler(MongoModel):
    
    rootUrl = 'http://thethaovanhoa.vn/{0}.htm'
    categories = {'132CT0/xa-hoi': {'name': 'Văn hóa - Xã hội', 'tag': ['xa hoi']},
        '131CT0/the-gioi': {'name': 'Thế giới', 'tag': ['the gioi']},
        '130CT0/muon-mau-the-thao': {'name': 'Thể thao', 'tag': ['the thao']},
        '129CT0/bong-da-quoc-te': {'name': 'Thể thao', 'tag': ['the thao']},
        '128CT0/bong-da-trong-nuoc': {'name': 'Thể thao', 'tag': ['the thao']},
        '350CT0/cuoc-song-so': {'name': 'Công nghệ số', 'tag': ['cong nghe so']},
        '135CT0/hau-truong': {'name': 'Giải trí', 'tag': ['giai tri']}
        }

    def __init__(self, host='localhost', port=27017):
        MongoModel.__init__(self, host, port)
        
    def standardizeTimeStr(self, timeStr):
        try:
            timeStr = commonlib.extractWithRegEx(r'(\d+/\d+/\d{4} \d+:\d+)', timeStr, 1)
            timeObj = datetime.datetime.strptime(timeStr, "%d/%m/%Y %H:%M") + datetime.timedelta(seconds=time.timezone)
            return timeObj
        except:
            logger.warning('An error occured while standardizeTimeStr')
            return datetime.datetime.utcnow()
    
    def getLeftRootOfCategory(self, catName):
        db = self.connection[DBNAME]
        collection = db['category']
        q = collection.find_one({'data': catName}, {'lft': 1, 'root_id': 1})
        result = {'category': q['lft'], 'root': q['root_id']} if q else ''
        return result
    
    def parent(self, node, tag, maxLevel=3):
        ilv = 0
        pnode = node
        while pnode.getparent() != None and ilv <= maxLevel:
            ilv += 1
            if pnode.tag == tag: break
            pnode = pnode.getparent()
        return pnode
    
    def getImageAndCaption(self, contentNode, url):
        data = {}
        if contentNode == None: return data
        for img in contentNode.xpath(".//img"):
            src = commonlib.urlJoin(url, commonlib.getAttribText(img, 'src'))
            if src != '':
                pnode = self.parent(img, 'tr', 3)
                captionText = commonlib.getElementText(pnode, descendant=1)
                if captionText == '':
                    captionNode = pnode.getnext()
                    if captionNode != None:
                        captionText = commonlib.getElementText(captionNode, descendant=1)
                data[hash(src)] = captionText if commonlib.wordCount(captionText) < 40 else '' 
        return data
    
    def identifyId(self, url):
        '''http://thethaovanhoa.vn/129N20110819081001939T129/hlv-ferguson-nhat-quyet-troi-ashley-young-o-hai-canh.htm'''
        try:
            return commonlib.extractWithRegEx(r'thethaovanhoa.vn/(.+)/.+.htm', url, 1)
        except:
            return ''
    
    def getContent(self, output, contentNode, url):
        def loaibo(matchStr, blackListWord=['']):
            for blw in blackListWord:
                if matchStr.startswith(blw): return True
                if re.search(blw, matchStr): return True
            return False
        tmpData = html2dict.html2text(etree.tounicode(contentNode), url)
        imageTable = self.getImageAndCaption(contentNode, url)
        stepOver = False
        for i in range(len(tmpData)):
            if (stepOver): stepOver = False; continue
            item = tmpData[i]
            if item['type'] == 'image':
                hashItem = item['hash']
                try:
                    if ((imageTable[hashItem] != '') and (tmpData[i+1]['type'] != 'textbold')):
                        if tmpData[i+1]['data'] == imageTable[hashItem]:
                            stepOver = True
                            item['caption'] = imageTable[hashItem]
                        elif (len(tmpData[i+1]['data']) > len(imageTable[hashItem]) and tmpData[i+1]['data'].startswith(imageTable[hashItem])):
                            tmpData[i+1]['data'] = tmpData[i+1]['data'][len(imageTable[hashItem]):].strip()
                            item['caption'] = imageTable[hashItem]
                    del item['hash']
                except: pass
                src = commonlib.downloadNUpload(ssh, item['data'], DOWNLOAD_PATH, PREFIX)
                if src != '': item['data'] = src
                else: continue
            else:
                if loaibo(item['data'], []): continue
            output.append(item)
    
    def getUrlResponse(self, url):
        req = mechanize.Request(url)
        res = mechanize.urlopen(req)
        return res.geturl()
    
    def getDetailNews(self, url):
        logger.debug('start getNewsDetail(url={0})'.format(url))
        data = {'postTime': '', 'author': '', 'content': []}
        try:
            tree = commonlib.getXMLTree(url, outputHTML=False)
            if tree == '' or tree == None: return
            contentNode = tree.xpath("//div[@id='divDetail']")
            if len(contentNode) == 0: return
            postTime = commonlib.getElementText(tree.xpath("//p[@class='timer1']"))
            postTime = self.standardizeTimeStr(postTime)
            data['postTime'] = postTime
            authorNode = contentNode[0].xpath("./p[@style='font-weight: bold; text-align: right;' and position()=last()]") if len(contentNode[0].getchildren()) > 1 else contentNode[0].xpath("./div/p[position()=last()]")
            if len(authorNode) > 0:
                text = commonlib.getElementText(authorNode[0], descendant=1)
                if commonlib.wordCount(text) < 5:
                    while(authorNode[0].getnext() != None):
                        nextSibling = authorNode[0].getnext()
                        nextSibling.getparent().remove(nextSibling)
                    data['author'] = text
                    authorNode[0].getparent().remove(authorNode[0])
            commonlib.dropTagWithIndex(contentNode[0], 'p', 1)
            content = []
            self.getContent(content, contentNode[0], url)
            data['content'] = content
            cprint('Author: {0}'.format(data['author']), 'green')
            cprint('PostTime: {0}'.format(data['postTime']))
            print '--------------------------------------------------'
            # -------------------------------------------------------------------
            for item in content:
                if item['type'] == 'image':
                    print item['type'], ': ', item['data'], '-> ', item['caption']
                else:
                    print item['type'], ': ', item['data']
            # -------------------------------------------------------------------
        except:
            logger.exception(str(sys.exc_info()[1]))
        finally:
            return data
    
    def getListNews(self, catId):
        logger.debug('start getListNews(catId={0})'.format(catId))
        url = self.rootUrl.format(catId)
        currentProcessed = 0
        currentDuplicated = 0
        global totalNewsCrawlered, totalNewsDuplicated
        try:
            db = self.connection[DBNAME]
            collection = db['article']
            url = self.rootUrl.format(catId)
            cat = self.getLeftRootOfCategory(self.categories[catId]['name'])
            if cat == '':
                logger.warning('chuyen muc: "%s" khong ton tai' % self.categories[catId]['name'])
                return
            while url != '':
                if currentProcessed > MAX_PROCESSED or currentDuplicated > MAX_DUPLICATED:
                    if currentProcessed > MAX_PROCESSED: logger.info('Chuyen muc {0} dung do qua gioi han {1} cho phep'.format(self.categories[catId]['name'], MAX_PROCESSED))
                    if currentDuplicated > MAX_DUPLICATED: logger.info('Chuyen muc {0} dung do duplicate > {1}'.format(self.categories[catId]['name'], MAX_DUPLICATED)) 
                    break
                itemQueue = {}
                tree = commonlib.getXMLTree(url)
                if tree == '' or tree == None: return
                firstItemNode = tree.xpath("//div[contains(@id, 'divHightLight')]//div[@class='boxnoidung']")
                if firstItemNode != None:
                    link = commonlib.getAttribText(firstItemNode[0].xpath('.//a'), 'href') 
                    link = commonlib.urlJoin(url, link) if link != '' else ''
                    id = self.identifyId(link)
                    print 'ID: {0}'.format(id)
                    if id != '':
                        hashUrl = commonlib.getMD5Hash(id)
                        if collection.find_one({'hashUrl': hashUrl}):
                            currentDuplicated += 1
                            cprint("WARNING: already existed in database !")
                        else:
                            thumbnail = commonlib.getAttribText(firstItemNode[0].xpath(".//a/img"), 'src')
                            thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                            if thumbnail != '': thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                            title = commonlib.getElementText(firstItemNode[0].xpath(".//h2/a"), descendant=1)
                            description = commonlib.getElementText(firstItemNode[0].xpath("./p"), descendant=1)
                            itemQueue[hashUrl] = {'link': link, 'hashUrl': hashUrl, 'thumbnail': thumbnail, 'title': title, 'description': description}
                            currentProcessed += 1
                for item in tree.xpath("//div[@class='box4362' and position()<last()-2]"):
                    title = commonlib.getElementText(item.xpath("./h4/a"), descendant=1)
                    description = commonlib.getElementText(item.xpath("./p"), descendant=1)
                    link = commonlib.getAttribText(item.xpath("./h4/a"), 'href')
                    link = commonlib.urlJoin(url, link) if link != '' else ''
                    id = self.identifyId(link)
                    print 'ID: {0}'.format(id)
                    if id == '' or link == '': continue
                    hashUrl = commonlib.getMD5Hash(id)
                    if collection.find_one({'hashUrl': hashUrl}):
                        currentDuplicated += 1
                        cprint("WARNING: already existed in database !")
                        continue
                    currentProcessed += 1
                    thumbnail = ''
                    thumbnailNode = item.xpath(".//a/img")
                    if thumbnailNode != None:
                        thumbnail = commonlib.getAttribText(thumbnailNode, 'src')
                        thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                        if thumbnail != '': thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                    itemQueue[hashUrl] = {'link': link, 'hashUrl': hashUrl, 'thumbnail': thumbnail, 'title': title, 'description': description}
                nextNode = tree.xpath("//div[@class='pagercontainer']/div[@class='currentpage']/following-sibling::*[1]")
                if nextNode == None: url = ''
                urlNext = commonlib.getAttribText(nextNode[0], 'href')
                urlNext = commonlib.urlJoin(url, urlNext) if urlNext != '' else ''
                if urlNext == '': url = ''
                url = re.sub(r'\s', '%20', urlNext)
                cprint('GOTO PAGE: {0}'.format(url), 'yellow')
            for item in itemQueue.values():
                link = item['link']
                title = item['title']
                print '--------------------------------------------------'
                print 'Title: {0}'.format(title)
                cprint('Link: {0}'.format(link), 'cyan')
                print 'Thumbnail: {0}'.format(item['thumbnail'])
                print 'Description: {0}'.format(item['description'])
                detail = self.getDetailNews(link)
                if ((len(detail['content']) == 0) or (len(detail) == 0)): continue
                collection.save({'hashUrl': item['hashUrl'],
                    'title': title,
                    'thumbnail': item['thumbnail'],
                    'description': item['description'],
                    'content': detail['content'],
                    'newsLink': link,
                    'update': detail['postTime'],
                    'source': 'thethaovanhoa.vn',
                    'author': detail['author'],
                    'category': cat['category'],
                    'root': cat['root'],
                    'is_active': True,
                    'lastupdate': datetime.datetime.utcnow(),
                    'timestamp': time.time(),
                    'date': datetime.datetime.utcnow(),
                    'tag': self.categories[catId]['tag']
                    })
                totalNewsCrawlered += 1
            totalNewsDuplicated += currentDuplicated
        except:
            logger.exception(str(sys.exc_info()[1]))

def quitIfTimeout():
    print 'call quitIfTimeout'
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 600:
                if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                    logger.critical(unicode("crawler tin tức thethaovanhoa không hoạt động", 'utf8'))
                else:
                    logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
                logger.info('process timeout {0}'.format(delta))
                logger.info('kill process {0}'.format(pid))
                os.system("kill -9 {0}".format(pid))
        except:
            logger.error('ERROR: could not kill python process with pid={0}'.format(pid))
        time.sleep(10)

if __name__ == '__main__':
    totalNewsCrawlered = 0
    totalNewsDuplicated = 0
    import threading
    lastaction = time.time()
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    # -----------------------------
    pid = os.getpid()
    threading.Thread(target=quitIfTimeout).start()
    logger.info('start crawler thethaovanhoa.vn')
    crawler = NewsCrawler(MONGO_SERVER, MONGO_PORT)
#    crawler.getListNews('129CT0/bong-da-quoc-te')
#    crawler.getDetailNews('http://thethaovanhoa.vn/128N20110808100303963T0/thac-mac-hong-biet-hoi-ai.htm')
    try:
        pool = workerpool.WorkerPool(size=5)
        pool.map(crawler.getListNews, crawler.categories.keys())
        pool.shutdown()
        pool.wait()
    except:
        logger.error(str(sys.exc_info()[1]))
    if ssh is not None: ssh.close()
    if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
        logger.critical(unicode("crawler tin tức thethaovanhoa không hoạt động", 'utf8'))
    else:
        logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
    logger.info('finished crawler thethaovanhoa.vn')
    forceQuit()