# -*- coding: utf-8 -*-

import commonlib
import traceback, re
import os, datetime, time, workerpool
import html2dict
import logging
import sys
from MongoModel import MongoModel
from MongoDbLog import MongoDbLog
from termcolor import cprint
from lxml import etree

DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
MONGO_PORT = 27017
DBNAME = 'tintuc_v2'
PREFIX = '/uploads/news/'
MAX_PROCESSED = 15
MAX_DUPLICATE = 10
logger = commonlib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'thanhnien.com.vn')

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9)  # force kill ~ kill -9 pid


class ThanhNien(MongoModel):
    
    pageUrl = 'http://www.thanhnien.com.vn/Pages/{0}.aspx?page={1}'
    
    categories = {'Chinh-tri-Xa-hoi': {'name': 'Văn hóa - Xã hội', 'tag': ['xa hoi', 'chinh tri', 'van hoa']},
                  'Giao-duc': {'name': 'Giáo dục', 'tag': ['giao duc']},
                  'Cong-nghe-thong-tin': {'name': 'Công nghệ số', 'tag': ['cong nghe so']},
                  'Suc-khoe': {'name': 'Sức khỏe - Giới tính', 'tag': ['suc khoe', 'gioi tinh']},
                  'Khoa-hoc': {'name': 'Khoa học', 'tag': ['khoa hoc']},
                  'The-gioi': {'name': 'Thế giới', 'tag': ['the gioi']},
                  'Van-hoa-Nghe-thuat': {'name': 'Văn hóa - Xã hội', 'tag': ['van hoa', 'nghe thuat']},
                  'Kinh-te': {'name': 'Kinh doanh', 'tag': ['kinh te', 'nha dat']},
                  'Lam-dep': {'name': 'Làm đẹp - Thời trang', 'tag': ['lam dep']},
                  'Giai-tri': {'name': 'Giải trí', 'tag': ['giai tri']},
                  'Phap-luat': {'name': 'Pháp luật', 'tag': ['phap luat']}
                  }
    
    def __init__(self, host='localhost', port=27017):
        MongoModel.__init__(self, host, port)
        
    def getLeftRootOfCategory(self, catName):
        db = self.connection[DBNAME]
        collection = db['category']
        q = collection.find_one({'data': catName}, {'lft': 1, 'root_id': 1})
        result = {'category': q['lft'], 'root': q['root_id']} if q else ''
        return result
    
    def standardizeTimeStr(self, timeStr):
        try:
            ti = datetime.datetime.strptime(timeStr, "%d/%m/%Y %H:%M")
            now = datetime.datetime.now()
            if (now < ti): ti = now;
            return ti + datetime.timedelta(seconds=time.timezone)
        except:
            return datetime.datetime.utcnow()
    
    def checkHasImage(self, node):
        if node == None: return False, 0
        f = False
        ic = 0
        for item in node.iterdescendants(tag='img'):
            ic += 1
            f = True
        return f, ic
    
    def parent(self, node, tag, maxLevel=3):
        ilv = 0
        pnode = node
        while pnode.getparent() != None and ilv <= maxLevel:
            ilv += 1
            if pnode.tag == tag: break
            pnode = pnode.getparent()
        return pnode
    
    def getImageAndCaption(self, contentNode, url):
        data = {}
        if contentNode == None: return data
        for img in contentNode.xpath(".//img"):
            src = commonlib.urlJoin(url, commonlib.getAttribText(img, 'src'))
            if src != '':
                pnode = img.getparent()
                captionText = ''
                if len(pnode) > 0:
                    captionText = commonlib.getElementText(pnode, descendant=1)
                if captionText == '':
                    pnode = self.parent(img, 'tr', 3)
                    pnode = pnode.getnext()
                    if pnode != None:
                        fImg, icImg = self.checkHasImage(pnode)
                        if not fImg and icImg == 0:
                            captionText = commonlib.getElementText(pnode, descendant=1)
                data[hash(src)] = captionText if commonlib.wordCount(captionText) < 30 else '' 
        return data
    
    def getContent(self, output, contentNode, url, thumbnail=''):
        try:
            def loaibo(matchStr, blackListWord=[]):
                for blw in blackListWord:
                    if re.search(blw, matchStr): return True
                return False
            # ----------------------------------------
            tmpData = html2dict.html2text(etree.tounicode(contentNode), url)
            imageTable = self.getImageAndCaption(contentNode, url)
            stepOver = False
            for i in range(len(tmpData)):
                if (stepOver): stepOver = False; continue
                item = tmpData[i]
                if item['type'] == 'image':
                    hashItem = item['hash']
                    try:
                        if ((imageTable[hashItem] != '') and (tmpData[i+1]['type'] != 'textbold')):
                            if tmpData[i+1]['data'] == imageTable[hashItem]:
                                stepOver = True
                                item['caption'] = imageTable[hashItem]
                            elif (len(tmpData[i+1]['data']) > len(imageTable[hashItem]) and tmpData[i+1]['data'].startswith(imageTable[hashItem])):
                                tmpData[i+1]['data'] = tmpData[i+1]['data'][len(imageTable[hashItem]):].strip()
                                item['caption'] = imageTable[hashItem]
                        del item['hash']
                    except: pass
                    src = commonlib.downloadNUpload(ssh, item['data'], DOWNLOAD_PATH, PREFIX)
                    if src != '': 
                        item['data'] = src
                        if thumbnail == '': thumbnail = src
                    else: continue
                else:
                    if loaibo(item['data'], ['>> Kỳ \d+.+', '^>>']): continue
                output.append(item)
        except:
            logger.exception(str(sys.exc_info()[1]))
    
    def processDetail(self, url, thumbnail=''):
        logger.debug('start processDetail(url={0})'.format(url))
        data = []
        postTime = ''
        author = ''
        try:
            tree = commonlib.getXMLTree(url)
            if tree == '' or tree == None: return
            postTime = commonlib.getElementText(tree.xpath("//div[@class='article-header']//span[@class='date-line']"))
            postTime = self.standardizeTimeStr(postTime)
            cprint('Time : {0}'.format(postTime), 'yellow')
            contentNode = tree.xpath("//div[contains(@class, 'article-content')]")
            if len(contentNode) == 0: return
            authorNode = contentNode[0].xpath("./p[@style='text-align:right' and position()=last()]")
            if len(authorNode) > 0:
                author = commonlib.getElementText(authorNode[0], descendant=1)
                if authorNode[0].getparent() != None: authorNode[0].getparent().remove(authorNode[0])
            # ------
            # Truong hop dac biet: bai viet co 1 anh ngoai content
            # ------
            firstImgOfNewsNode = tree.xpath("//div[@class='article-header']/following-sibling::*[1][@class='article-ds']/table")
            if len(firstImgOfNewsNode) > 0:
                fImg, icImg = self.checkHasImage(firstImgOfNewsNode[0])
                if fImg and icImg == 1:
                    src = commonlib.getAttribText(firstImgOfNewsNode[0].xpath(".//td[@class='img-news']//img"), 'src')
                    src = commonlib.urlJoin(url, src) if src != '' else ''
                    if src != '':
                        src = commonlib.downloadNUpload(ssh, src, DOWNLOAD_PATH, PREFIX)
                        if src != '':
                            text = commonlib.getElementText(firstImgOfNewsNode[0].xpath(".//td[@class='caption']"), descendant=1)
                            captionText = ''
                            if commonlib.wordCount(text) < 35: captionText = text
                            data.append({'type': 'image', 'data': src, 'caption': captionText})
            self.getContent(data, contentNode[0], url, thumbnail)
            # -------------------------------------------------------------------
            for item in data:
                if item['type'] == 'image':
                    print item['type'], ': ', item['data'], '-> ', item['caption']
                else:
                    print item['type'], ': ', item['data']
            # -------------------------------------------------------------------
        except:
            cprint('ERROR: at url={0}'.format(url), 'red')
            logger.exception(str(sys.exc_info()[1]))
        finally:
            return data, postTime, author
        
    def processCategory(self, catId, url, currentProcessed=0, currentDuplicated=0):
        logger.debug('start processCategory(catId={0}, url={1}, currentProcessed={2}, currentDuplicated={3}'.format(catId, url, currentProcessed, currentDuplicated))
        try:
            db = self.connection[DBNAME]
            collection = db['article']
            cat = self.getLeftRootOfCategory(self.categories[catId]['name'])
            if cat == '':
                logger.info(unicode('chuyen muc: "%s" khong ton tai' % self.categories[catId]['name']), 'utf8')
                return
            tree = commonlib.getXMLTree(url)
            if tree == None or tree == '': return
            for item in tree.xpath("//div[@id='divtoptin']"):
                if currentDuplicated > MAX_DUPLICATE or currentProcessed > MAX_PROCESSED: return
                link = commonlib.getAttribText(item.xpath(".//div[@class='lvkd-title']/a"), 'href')
                if link == '': return
                link = commonlib.urlJoin(url, link)
                hashUrl = commonlib.getMD5Hash(link)
                if collection.find_one({'hashUrl': hashUrl}):
                    currentDuplicated += 1
                    cprint('Already existed in databse: {0}'.format(link), 'red')
                    continue
                title = commonlib.getElementText(item.xpath(".//div[@class='lvkd-title']/a"))
                cprint(title, 'magenta')
                description = commonlib.getElementText(item.xpath(".//div[@class='lvkd-nd']/div[@class='text-nt']"), descendant=1)
                cprint(description, 'green')
                thumbnail = commonlib.getAttribText(item.xpath(".//div[@class='lvkd-nd']//a/img"), 'src')
                thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                if thumbnail != '':
                    thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                cprint('thumbnail: {0}'.format(thumbnail), 'yellow')
                content, postTime, author = self.processDetail(link, thumbnail)
                if len(content) == 0: continue
                collection.save({'hashUrl': hashUrl,
                    'title': title,
                    'thumbnail': thumbnail,
                    'description': description,
                    'content': content,
                    'newsLink': link,
                    'update': postTime,
                    'source': 'thanhnien.com.vn',
                    'author': author,
                    'category': cat['category'],
                    'root': cat['root'],
                    'is_active': True,
                    'lastupdate': datetime.datetime.utcnow(),
                    'timestamp': time.time(),
                    'date': datetime.datetime.utcnow(),
                    'tag': self.categories[catId]['tag']
                })
                currentProcessed += 1
        except:
            logger.exception(str(sys.exc_info()[1]))
        finally:
            return currentProcessed, currentDuplicated
    
    def process(self, catId):
        logger.debug('start process(catId={0})'.format(catId))
        global totalNewsCrawlered, totalNewsDuplicated
        try:
            currentProcessed = 0
            currentDuplicated = 0
            for i in range(1, 5):
                url = self.pageUrl.format(catId, i)
                currentProcessed, currentDuplicated = self.processCategory(catId, url, currentProcessed, currentDuplicated)
                if currentDuplicated > MAX_DUPLICATE or currentProcessed > MAX_PROCESSED:
                    if currentDuplicated > MAX_DUPLICATE: logger.info(unicode('số bài viết trùng quá ngưỡng {0} bài viết'.format(MAX_DUPLICATE), 'utf8'))
                    if currentProcessed > MAX_PROCESSED: logger.info(unicode('số bài viết quá giới hạn {0} cho phép'.format(MAX_PROCESSED), 'utf8'))
                    break
            totalNewsCrawlered += currentProcessed
            totalNewsDuplicated += currentDuplicated
        except:
            logger.error(str(sys.exc_info()[1]))
        finally:
            return

def quitIfTimeout():
    print 'call quitIfTimeout'
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 600:
                if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                    logger.critical(unicode("crawler tin tức thanhnien.com.vn không hoạt động", 'utf8'))
                else:
                    logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
                logger.info('process timeout {0}'.format(delta))
                logger.info('kill process {0}'.format(pid))
                os.system("kill -9 {0}".format(pid))
        except:
            logger.error('ERROR: could not kill python process with pid={0}'.format(pid))
        time.sleep(10)

if __name__ == '__main__':
    totalNewsCrawlered = 0
    totalNewsDuplicated = 0
    import argparse
    import threading
    lastaction = time.time()
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    # -----------------------------
    threading.Thread(target=quitIfTimeout).start()
    logger.info('start crawler thanhnien.com.vn at {0}'.format(datetime.datetime.now()))
    totalNewsCrawled = 0
    thanhnien = ThanhNien(MONGO_SERVER, MONGO_PORT)
    try:
        pool = workerpool.WorkerPool(size=2)
        pool.map(thanhnien.process, thanhnien.categories.keys())
        pool.shutdown()
        pool.wait()
    except:
        logger.error(str(sys.exc_info()[1]))
    logger.info('total news crawled: {0}'.format(totalNewsCrawled))
    if ssh is not None: ssh.close()
    if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
        logger.critical(unicode("crawler tin tức thanhnien.com.vn không hoạt động", 'utf8'))
    else:
        logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
    logger.info('finished crawler thanhnien.com.vn at {0}'.format(datetime.datetime.now()))
    forceQuit()
