# -*- coding: utf-8 -*-

import traceback, os, re
import commonlib, datetime, time
import workerpool
import html2dict
import logging
import sys
from MongoModel import MongoModel
from lxml import etree
from termcolor import cprint

MAX_PROCESSED = 15
MAX_DUPLICATED = 10
DBNAME = 'tintuc_v2'
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
MONGO_PORT = 27017
PREFIX = '/uploads/news/'
logger = commonlib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'tuoitre.vn')

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9) # kill -9 pid tren linux

class NewsCrawler(MongoModel):
    
    url = 'http://tuoitre.vn/{catid}/Index.html'
    categories = {'Chinh-tri-Xa-hoi': {'name': 'Văn hóa - Xã hội', 'tag': ['chinh tri', 'xa hoi']},
        'The-gioi': {'name': 'Thế giới', 'tag': ['the gioi']},
        'Van-hoa-Giai-tri': {'name': 'Văn hóa - Xã hội', 'tag': ['van hoa', 'xa hoi']},
        'Giao-duc': {'name': 'Giáo dục', 'tag': ['giao duc']},
        'Nhip-song-tre': {'name': 'Sức khỏe - Giới tính', 'tag': ['suc khoe', 'gioi tinh']},
        }
    
    def __init__(self, host='localhost', port=27017):
        MongoModel.__init__(self, host, port)
    
    def getLeftRootOfCategory(self, catName):
        db = self.connection[DBNAME]
        collection = db['category']
        q = collection.find_one({'data': catName}, {'lft': 1, 'root_id': 1})
        result = {'category': q['lft'], 'root': q['root_id']} if q else ''
        return result
    
    def standardizeTimeStr(self, timeStr):
        '''writeTime('2011/08/15 18:16:51')'''
        time_str = commonlib.extractWithRegEx(r"\('(.+)'\)", timeStr, 1)
        try:
            return datetime.datetime.strptime(time_str, "%Y/%m/%d %H:%M:%S") + datetime.timedelta(seconds=time.timezone)
        except:
            cprint('ERROR: standardize time string occured error (timeStr={0})'.format(timeStr))
            return datetime.datetime.utcnow()
    
    def identifyId(self, url):
        return commonlib.extractWithRegEx(r'/(\d+)/.+\.html', url, 1)
    
    def parent(self, node, tag, maxLevel=3):
        ilv = 0
        pnode = node
        while pnode.getparent() != None and ilv <= maxLevel:
            ilv += 1
            if pnode.tag == tag: break
            pnode = pnode.getparent()
        return pnode
    
    def getImageAndCaption(self, contentNode, url):
        data = {}
        if contentNode == None: return data
        for img in contentNode.xpath(".//img"):
            src = commonlib.urlJoin(url, commonlib.getAttribText(img, 'src'))
            if src != '':
                pnode = self.parent(img, 'tr', 3)
                captionNode = pnode.getnext()
                captionText = ''
                if captionNode != None:
                    captionText = commonlib.getElementText(captionNode, descendant=1)
                data[hash(src)] = captionText if commonlib.wordCount(captionText) < 40 else '' 
        return data
    
    def getContent(self, output, contentNode, url, thumbnail=''):
        def loaibo(matchStr, blackListWord=[]):
            for blw in blackListWord:
                if re.search(blw, matchStr): return True
            return False
        tmpData = html2dict.html2text(etree.tounicode(contentNode), url)
        imageTable = self.getImageAndCaption(contentNode, url)
        stepOver = False
        for i in range(len(tmpData)):
            if (stepOver): stepOver = False; continue
            item = tmpData[i]
            if item['type'] == 'image':
                hashItem = item['hash']
                try:
                    if ((imageTable[hashItem] != '') and (tmpData[i+1]['type'] != 'textbold')):
                        if tmpData[i+1]['data'] == imageTable[hashItem]:
                            stepOver = True
                            item['caption'] = imageTable[hashItem]
                        elif (len(tmpData[i+1]['data']) > len(imageTable[hashItem]) and tmpData[i+1]['data'].startswith(imageTable[hashItem])):
                            tmpData[i+1]['data'] = tmpData[i+1]['data'][len(imageTable[hashItem]):].strip()
                            item['caption'] = imageTable[hashItem]
                    del item['hash']
                except: pass
                src = commonlib.downloadNUpload(ssh, item['data'], DOWNLOAD_PATH, PREFIX)
                if src != '': 
                    item['data'] = src
                    if thumbnail == '': thumbnail = src
                else: continue
            else:
                if loaibo(item['data'], []): continue
            output.append(item)
    
    def getNewsDetail(self, url):
        print('start getNewsDetail(url={0})'.format(url))
        data = {}
        try:
            tree = commonlib.getXMLTree(url)
            if tree == '' or tree == None: return
            contentNode = tree.xpath("//div[@id='divContent']")
            if len(contentNode) == 0: return
            postTime = tree.xpath("//div[@id='contentBdy']//span/script[contains(., 'writeTime')]")
            postTime = self.standardizeTimeStr(commonlib.getElementText(postTime, descendant=1))
            title = commonlib.getElementText(contentNode[0].xpath(".//p[@class='pTitle'][1]"), descendant=1)
            if title == '': return
            description = commonlib.getElementText(contentNode[0].xpath(".//p[@class='pHead'][1]"), descendant=1)
            author = commonlib.getElementText(contentNode[0].xpath(".//p[@class='pAuthor']"), descendant=1)
            commonlib.cleanElementWithAttrib(contentNode[0], 'p', [('class', 'pTitle')])
            commonlib.cleanElementWithAttrib(contentNode[0], 'p', [('class', 'pHead')])
            commonlib.cleanElementWithAttrib(contentNode[0], 'p', [('class', 'pAuthor')])
            content = []
            thumbnail = ''
            self.getContent(content, contentNode[0], url, thumbnail)
            print '# ----------------------------------------'
            print 'Title: ', title
            print 'Description: ', description
            print 'Author: ', author
            print 'Time: ', postTime
            print '# ----------------------------------------'
            # -------------------------------------------------------------------
            for item in content:
                if item['type'] == 'image':
                    print item['type'], ': ', item['data'], '-> ', item['caption']
                else:
                    print item['type'], ': ', item['data']
            # -------------------------------------------------------------------
            data['title'] = title
            data['description'] = description
            data['author'] = author
            data['content'] = content
            data['postDate'] = postTime
            data['thumbnail'] = thumbnail
        except:
            logger.exception(str(sys.exc_info()[1]))
        finally:
            return data
        
    def getListNews(self, catId):
        print('getListNews(catId={0})'.format(catId))
        currentProcessed = 0
        currentDuplicated = 0
        global totalNewsCrawlered, totalNewsDuplicated
        try:
            db = self.connection[DBNAME]
            collection = db['article']
            url = self.url.format(catid=catId)
            cat = self.getLeftRootOfCategory(self.categories[catId]['name'])
            if cat == '':
                logger.warning(unicode('chuyen muc: "%s" khong ton tai' % self.categories[catId]['name']), 'utf-8')
                return
            itemQueue = {}
            while url != '':
                if currentProcessed > MAX_PROCESSED or currentDuplicated > MAX_DUPLICATED:
                    if currentProcessed > MAX_PROCESSED: logger.info(unicode('Chuyen muc {0} dung do qua gioi han {1} cho phep'.format(self.categories[catId]['name'], MAX_PROCESSED), 'utf-8'))
                    if currentDuplicated > MAX_DUPLICATED: logger.info(unicode('Chuyen muc {0} dung do duplicate > {1}'.format(self.categories[catId]['name'], MAX_DUPLICATED), 'utf-8')) 
                    break
                tree = commonlib.getXMLTree(url)
                if tree == '' or tree == None: break
                firstItemNode = tree.xpath("//div[@style='width:498px;float:left;overflow:hidden;']")
                if len(firstItemNode) > 0:
                    linkNode = firstItemNode[0].xpath(".//img/..")
                    if len(linkNode) > 0:
                        link = commonlib.getAttribText(linkNode[0], 'href')
                        link = commonlib.urlJoin(url, link) if link != '' else ''
                        link = re.sub(r'/(\d+)/(.+).html', lambda m: "/{0}/a.html".format(m.group(1)), link)
                        if link == '': continue
                        hashUrl = commonlib.getMD5Hash("tuoitre_{0}".format(self.identifyId(link)))
                        isExist = collection.find_one({'hashUrl': hashUrl})
                        if isExist:
                            cprint('Already existed in database !', 'yellow')
                            currentDuplicated += 1 
                        else:
                            thumbnail = commonlib.getAttribText(firstItemNode[0].xpath(".//img"), 'src')
                            thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                            if thumbnail != '': thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                            if not itemQueue.has_key(hashUrl): itemQueue[hashUrl] = {'link': link, 'thumbnail': thumbnail, 'hashUrl': hashUrl}
                for item in tree.xpath("//div[@style='width: 672px; clear: both; overflow: hidden; margin-top: 5px; margin-bottom: 5px;']"):
                    linkNode = item.xpath(".//a")
                    if len(linkNode) == 0: raise Exception("ERROR: Lỗi xpath lấy list link của category")
                    link = commonlib.getAttribText(linkNode, 'href')
                    link = commonlib.urlJoin(url, link) if link != '' else ''
                    link = re.sub(r'/(\d+)/(.+).html', lambda m: "/{0}/a.html".format(m.group(1)), link)
                    if link == '': continue
                    hashUrl = commonlib.getMD5Hash("tuoitre_{0}".format(self.identifyId(link)))
                    isExist = collection.find_one({'hashUrl': hashUrl})
                    if isExist:
                        cprint('Already existed in database !', 'yellow')
                        currentDuplicated += 1 
                        continue
                    thumbnail = commonlib.getAttribText(item.xpath(".//a/img"), 'src')
                    thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                    if thumbnail != '':
                        thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                    itemQueue[hashUrl] = {'link': link, 'thumbnail': thumbnail, 'hashUrl': hashUrl}
                    currentProcessed += 1
                for item in itemQueue.values():
                    link = item['link']
                    thumbnail = item['thumbnail']
                    detail = self.getNewsDetail(link)
                    if ((len(detail['content']) == 0) or (len(detail) == 0)): continue
                    collection.save({'hashUrl': item['hashUrl'],
                        'title': detail['title'],
                        'thumbnail': thumbnail if thumbnail != '' else detail['thumbnail'],
                        'description': detail['description'],
                        'content': detail['content'],
                        'newsLink': link,
                        'update': detail['postDate'],
                        'source': 'tuoitre.vn',
                        'author': '',
                        'category': cat['category'],
                        'root': cat['root'],
                        'is_active': True,
                        'lastupdate': datetime.datetime.utcnow(),
                        'timestamp': time.time(),
                        'date': datetime.datetime.utcnow(),
                        'tag': self.categories[catId]['tag']
                        })
                    totalNewsCrawlered += 1
                itemQueue = {}
                nextPageNode = tree.xpath("//div[@class='bartoolChannellist']//img[contains(@title, 'Các tin bài tiếp theo')]/..".decode('utf-8'))
                if len(nextPageNode) > 0:
                    nextUrl = commonlib.getAttribText(nextPageNode[0], 'href')
                    nextUrl = re.sub(r'\s', '%20', nextUrl)
                    url = commonlib.urlJoin(url, nextUrl) if nextUrl != '' else ''
                else:
                    url = ''
            totalNewsDuplicated += currentDuplicated
        except:
            logger.exception(str(sys.exc_info()[1]))
    
def quitIfTimeout():
    logger.debug('call quitIfTimeout')
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 600:
                if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                    logger.critical(unicode("crawler tin tức tuoitre.vn không hoạt động", 'utf8'))
                else:
                    logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
                logger.info('process timeout {0}'.format(delta))
                logger.info('kill process {0}'.format(pid))
                os.system("kill -9 {0}".format(pid))
        except:
            logger.error('ERROR: could not kill python process with pid={0}'.format(pid))
        time.sleep(10)
    
if __name__ == '__main__':
    totalNewsCrawlered = 0
    totalNewsDuplicated = 0
    lastaction = time.time()
    import argparse
    import threading
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    # -----------------------------
    threading.Thread(target=quitIfTimeout).start()
    logger.info('start crawler tuoitre.vn')
    crawler = NewsCrawler(MONGO_SERVER, MONGO_PORT)
    try:
        pool = workerpool.WorkerPool(size=2)
        pool.map(crawler.getListNews, crawler.categories.keys())
        pool.shutdown()
        pool.wait()
    except:
        logger.error(str(sys.exc_info()[1]))
    if ssh is not None: ssh.close()
    if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
        logger.critical(unicode("crawler tin tức tuoitre.vn không hoạt động", 'utf8'))
    else:
        logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
    logger.info('finished crawler tuoitre.vn at {0}'.format(datetime.datetime.now()))
    forceQuit()
