# -*- coding: utf-8 -*-

import traceback, os, re
import commonlib, datetime, time
import workerpool
import html2dict
import mechanize
import logging
import sys
from MongoModel import MongoModel
from lxml import etree
from termcolor import cprint

MAX_PROCESSED = 15
MAX_DUPLICATED = 10
DBNAME = 'tintuc_v2'
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
MONGO_PORT = 27017
PREFIX = '/uploads/news/'
logger = commonlib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'vietnamplus')

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9) # kill -9 pid tren linux
    
class NewsCrawler(MongoModel):
    
    rootUrl = 'http://www.vietnamplus.vn/Home/{0}.vnplus'
    categories = {'xahoi': {'name': 'Văn hóa - Xã hội', 'tag': ['xa hoi']},
        'chinhtri': {'name': 'Văn hóa - Xã hội', 'tag': ['chinh tri']},
        'thegioi': {'name': 'Thế giới', 'tag': ['the gioi']},
        'vanhoa': {'name': 'Văn hóa - Xã hội', 'tag': ['van hoa']},
        'kinhte': {'name': 'Kinh doanh', 'tag': ['kinh doanh']},
        'thethao': {'name': 'Thể thao', 'tag': ['the thao']},
        'congnghe': {'name': 'Công nghệ số', 'tag': ['cong nghe so']},
        'vanhoa/dienanh': {'name': 'Giải trí', 'tag': ['giai tri']},
        'doisong/amthuc': {'name': 'Ẩm thực', 'tag': ['am thuc']},
        'doisong/lamdep': {'name': 'Làm đẹp - Thời trang', 'tag': ['lam dep']},
        'doisong/suckhoe': {'name': 'Sức khỏe - Giới tính', 'tag': ['suc khoe']}
        }

    def __init__(self, host='localhost', port=27017):
        MongoModel.__init__(self, host, port)
        
    def standardizeTimeStr(self, timeStr):
        try:
            timeStr = re.sub(r' \| ', ' ', timeStr)
            return datetime.datetime.strptime(timeStr, "%d/%m/%Y %H:%M:%S") + datetime.timedelta(seconds=time.timezone)
        except:
            cprint('ERROR: an error occured while standardizeTimeStr', 'red')
            return datetime.datetime.utcnow()
    
    def getLeftRootOfCategory(self, catName):
        db = self.connection[DBNAME]
        collection = db['category']
        q = collection.find_one({'data': catName}, {'lft': 1, 'root_id': 1})
        result = {'category': q['lft'], 'root': q['root_id']} if q else ''
        return result
    
    def parent(self, node, tag, maxLevel=3):
        ilv = 0
        pnode = node
        while pnode.getparent() != None and ilv <= maxLevel:
            ilv += 1
            if pnode.tag == tag: break
            pnode = pnode.getparent()
        return pnode
    
    def getImageAndCaption(self, contentNode, url):
        data = {}
        if contentNode == None: return data
        for img in contentNode.xpath(".//img"):
            src = commonlib.urlJoin(url, commonlib.getAttribText(img, 'src'))
            if src != '':
                captionText = ''
                try:
                    cnode = img.getnext().getnext()
                    captionText = commonlib.getElementText(cnode, descendant=1)
                except: pass
                data[hash(src)] = captionText if commonlib.wordCount(captionText) < 40 else '' 
        return data
    
    def identifyId(self, url):
        '''http://thethaovanhoa.vn/129N20110819081001939T129/hlv-ferguson-nhat-quyet-troi-ashley-young-o-hai-canh.htm'''
        try:
            return "vnplus_{0}".format(commonlib.extractWithRegEx(r'/(\d+).vnplus', url, 1))
        except:
            return ''
    
    def getContent(self, output, contentNode, url, thumbnail):
        def loaibo(matchStr, blackListWord=['']):
            for blw in blackListWord:
                if matchStr.startswith(blw): return True
                if re.search(blw, matchStr): return True
            return False
        tmpData = html2dict.html2text(etree.tounicode(contentNode), url)
        imageTable = self.getImageAndCaption(contentNode, url)
        stepOver = False
        for i in range(len(tmpData)):
            if (stepOver): stepOver = False; continue
            item = tmpData[i]
            if item['type'] == 'image':
                hashItem = item['hash']
                try:
                    if ((imageTable[hashItem] != '') and (tmpData[i+1]['type'] != 'textbold')):
                        if tmpData[i+1]['data'] == imageTable[hashItem]:
                            stepOver = True
                            item['caption'] = imageTable[hashItem]
                        elif (len(tmpData[i+1]['data']) > len(imageTable[hashItem]) and tmpData[i+1]['data'].startswith(imageTable[hashItem])):
                            tmpData[i+1]['data'] = tmpData[i+1]['data'][len(imageTable[hashItem]):].strip()
                            item['caption'] = imageTable[hashItem]
                    del item['hash']
                except: pass
                src = commonlib.downloadNUpload(ssh, item['data'], DOWNLOAD_PATH, PREFIX)
                if src != '': 
                    item['data'] = src
                    if thumbnail == '': thumbnail = src
                else: continue
            else:
                if loaibo(item['data'], []): continue
            output.append(item)
    
    def getUrlResponse(self, url):
        req = mechanize.Request(url)
        res = mechanize.urlopen(req)
        return res.geturl()
    
    def getDetailNews(self, url):
        logger.debug(unicode('start getNewsDetail(url={0})'.format(url), 'utf8'))
        data = {'postTime': '', 'author': '', 'content': [], 'thumbnail': ''}
        try:
            tree = commonlib.getXMLTree(url, outputHTML=False)
            if tree == '' or tree == None: return
            contentNode = tree.xpath("//div[@class='dtContent']/div[@class='dtText']")
            if len(contentNode) == 0: return
            postTime = commonlib.getElementText(tree.xpath("//div[@class='discussTime']/div[@class='keyword']/preceding-sibling::*[1]"), descendant=1)
            postTime = self.standardizeTimeStr(postTime)
            data['postTime'] = postTime
            authorNode = contentNode[0].xpath(".//div[@class='dtContentTxtAuthor']")
            data['author'] = commonlib.getElementText(authorNode, descendant=1)
            if (authorNode != None): authorNode[0].getparent().remove(authorNode[0])
            content = []
            imageNode = tree.xpath("//div[@id='picOfnews']//img[contains(@id, 'imgImage')]")
            if len(imageNode) > 0:
                img = commonlib.getAttribText(imageNode[0], 'src')
                img = commonlib.urlJoin(url, img) if img != '' else ''
                if img != '':
                    img = commonlib.downloadNUpload(ssh, img, DOWNLOAD_PATH, PREFIX)
                    if img != '':
                        if data['thumbnail'] == '': data['thumbnail'] = img
                        captionText = commonlib.getElementText(tree.xpath("//div[@id='picOfnews']//span[contains(@id, 'lbImageCaption')]"))
                        content.append({'type': 'image', 'data': img, 'caption': captionText})
            self.getContent(content, contentNode[0], url, data['thumbnail'])
            data['content'] = content
            cprint('Author: {0}'.format(data['author']), 'green')
            cprint('PostTime: {0}'.format(data['postTime']))
            print '--------------------------------------------------'
            # -------------------------------------------------------------------
            for item in content:
                if item['type'] == 'image':
                    print item['type'], ': ', item['data'], '-> ', item['caption']
                else:
                    print item['type'], ': ', item['data']
            # -------------------------------------------------------------------
        except:
            logger.exception(str(sys.exc_info()[1]))
        finally:
            return data
    
    def getListNews(self, catId):
        logger.debug(unicode('start getListNews(catId={0})'.format(catId), 'utf8'))
        url = self.rootUrl.format(catId)
        currentProcessed = 0
        currentDuplicated = 0
        global totalNewsCrawlered, totalNewsDuplicated
        try:
            db = self.connection[DBNAME]
            collection = db['article']
            url = self.rootUrl.format(catId)
            cat = self.getLeftRootOfCategory(self.categories[catId]['name'])
            if cat == '':
                logger.warning(unicode('chuyen muc: "%s" khong ton tai' % self.categories[catId]['name'], 'utf8'))
                return
            while url != '':
                if currentProcessed > MAX_PROCESSED or currentDuplicated > MAX_DUPLICATED:
                    if currentProcessed > MAX_PROCESSED: logger.info(unicode('Chuyen muc {0} dung do qua gioi han {1} cho phep'.format(self.categories[catId]['name'], MAX_PROCESSED), 'utf8'))
                    if currentDuplicated > MAX_DUPLICATED: logger.info(unicode('Chuyen muc {0} dung do duplicate > {1}'.format(self.categories[catId]['name'], MAX_DUPLICATED), 'utf8')) 
                    break
                itemQueue = {}
                tree = commonlib.getXMLTree(url)
                if tree == '' or tree == None: return
                listXpath = [{'bound': "//div[@class='catCol1']//div[@class='catNews']", 
                    'link': ".//div[@class='catNewsText']/a",
                    'thumbnail': ".//img[contains(@id, 'top3tinID')]", 
                    'title': ".//div[@class='catNewsText']/a", 
                    'description': ".//span[contains(@id, 'top3tinID_lbDesc')]"},
                    {'bound': "//div[@class='catCol1']/div[@class='catNewsBottom']/div[@class='catItem']", 
                     'link': ".//a[contains(@id, 'top3tinID_lnkHeadline')]",
                    'thumbnail': ".//img[contains(@id, 'top3tinID')]", 
                    'title': ".//a[contains(@id, 'top3tinID_lnkHeadline')]", 
                    'description': ".//span[contains(@id, 'lbDesc')]"},
                    {'bound': "//div[@class='catColLeft']/div[@class='catItem02']", 
                     'link': ".//a[contains(@id, 'rptMainList')]",
                    'thumbnail': ".//img[contains(@id, 'top3tinID')]", 
                    'title': ".//a[contains(@id, 'rptMainList')]", 
                    'description': ".//span[contains(@id, 'lbDesc')]"},
                    {'bound': "//div[@id='focusItem']/div[@class='focusNews']", 
                     'link': ".//a[contains(@id, 'lnkHeadline')]",
                    'thumbnail': ".//img[contains(@id, 'imgImage')]", 
                    'title': ".//a[contains(@id, 'lnkHeadline')]", 
                    'description': ".//span[contains(@id, 'lbDesc')]"}]
                for xp in listXpath:
                    for item in tree.xpath(xp['bound']):
                        title = commonlib.getElementText(item.xpath(xp['title']), descendant=1)
                        description = commonlib.getElementText(item.xpath(xp['description']), descendant=1)
                        link = commonlib.getAttribText(item.xpath(xp['link']), 'href')
                        link = commonlib.urlJoin(url, link) if link != '' else ''
                        id = self.identifyId(link)
                        print 'ID: {0}'.format(id)
                        if id == '' or link == '': continue
                        hashUrl = commonlib.getMD5Hash(id)
                        if collection.find_one({'hashUrl': hashUrl}):
                            currentDuplicated += 1
                            cprint("WARNING: already existed in database !")
                            continue
                        currentProcessed += 1
                        thumbnail = ''
                        thumbnailNode = item.xpath(xp['thumbnail'])
                        if thumbnailNode != None:
                            thumbnail = commonlib.getAttribText(thumbnailNode, 'src')
                            thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                            if thumbnail != '': thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                        itemQueue[hashUrl] = {'link': link, 'hashUrl': hashUrl, 'thumbnail': thumbnail, 'title': title, 'description': description}
                nextNode = tree.xpath("//div[@class='pageView']/div[@class='pageViewNo']/span/following-sibling::*[1]")
                if nextNode == None: url = ''
                urlNext = commonlib.getAttribText(nextNode[0], 'href')
                urlNext = commonlib.urlJoin(url, urlNext) if urlNext != '' else ''
                if urlNext == '': url = ''
                url = re.sub(r'\s', '%20', urlNext)
                cprint('GOTO PAGE: {0}'.format(url), 'yellow')
            for item in itemQueue.values():
                link = item['link']
                title = item['title']
                print '--------------------------------------------------'
                print 'Title: {0}'.format(title)
                cprint('Link: {0}'.format(link), 'cyan')
                print 'Thumbnail: {0}'.format(item['thumbnail'])
                print 'Description: {0}'.format(item['description'])
                detail = self.getDetailNews(link)
                if ((len(detail['content']) == 0) or (len(detail) == 0)): continue
                collection.save({'hashUrl': item['hashUrl'],
                    'title': title,
                    'thumbnail': item['thumbnail'] if item['thumbnail'] != '' else detail['thumbnail'],
                    'description': item['description'],
                    'content': detail['content'],
                    'newsLink': link,
                    'update': detail['postTime'],
                    'source': 'vietnamplus.vn',
                    'author': detail['author'],
                    'category': cat['category'],
                    'root': cat['root'],
                    'is_active': True,
                    'lastupdate': datetime.datetime.utcnow(),
                    'timestamp': time.time(),
                    'date': datetime.datetime.utcnow(),
                    'tag': self.categories[catId]['tag']
                    })
                totalNewsCrawlered += 1
            totalNewsDuplicated += currentDuplicated
        except:
            logger.exception(str(sys.exc_info()[1]))

def quitIfTimeout():
    print 'call quitIfTimeout'
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 600:
                if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                    logger.critical(unicode("crawler tin tức vietnamplus không hoạt động", 'utf8'))
                else:
                    logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
                logger.info('process timeout {0}'.format(delta))
                logger.info('kill process {0}'.format(pid))
                os.system("kill -9 {0}".format(pid))
        except:
            logger.error('ERROR: could not kill python process with pid={0}'.format(pid))
        time.sleep(10)

if __name__ == '__main__':
    totalNewsCrawlered = 0
    totalNewsDuplicated = 0
    import threading
    import argparse
    lastaction = time.time()
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    # -----------------------------
    threading.Thread(target=quitIfTimeout).start()
    logger.info('start crawler vietnamplus')
    crawler = NewsCrawler(MONGO_SERVER, MONGO_PORT)
    try:
        pool = workerpool.WorkerPool(size=2)
        pool.map(crawler.getListNews, crawler.categories.keys())
        pool.shutdown()
        pool.wait()
    except:
        logger.error(str(sys.exc_info()[1]))
    if ssh is not None: ssh.close()
    if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
        logger.critical(unicode("crawler tin tức vietnamplus không hoạt động", 'utf8'))
    else:
        logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
    logger.info('finished crawler vietnamplus at {0}'.format(datetime.datetime.now()))
    forceQuit()
