# -*- coding: utf-8 -*-
import traceback, os, re, sys
import commonlib, datetime, time
import workerpool
import html2dict
import mechanize
import argparse
import logging
from MongoModel import MongoModel
from lxml import etree
from termcolor import cprint
from mongolog.handlers import MongoHandler

MAX_PROCESSED = 15
MAX_DUPLICATED = 10
DBNAME = 'tintuc_v2'
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
MONGO_PORT = 27017
PREFIX = '/uploads/news/'
logger = commonlib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'vnexpress.net')

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9) # kill -9 pid tren linux
    
class NewsCrawler(MongoModel):
    
    rootUrl = 'http://vnexpress.net/gl/{0}/'
    categories = {'xa-hoi': {'name': unicode('Văn hóa - Xã hội', 'utf8'), 'tag': ['xa hoi']},
        'the-gioi': {'name': unicode('Thế giới', 'utf8'), 'tag': ['the gioi']},
        'kinh-doanh': {'name': unicode('Kinh doanh', 'utf8'), 'tag': ['kinh doanh']},
        'phap-luat': {'name': unicode('Pháp luật', 'utf8'), 'tag': ['phap luat']},
        'the-thao': {'name': unicode('Thể thao', 'utf8'), 'tag': ['the thao']},
        'khoa-hoc': {'name': unicode('Khoa học', 'utf8'), 'tag': ['khoa hoc']},
        'vi-tinh': {'name': unicode('Công nghệ số', 'utf8'), 'tag': ['cong nghe so']},
        'xa-hoi/giao-duc': {'name': unicode('Giáo dục', 'utf8'), 'tag': ['giao duc']},
        'van-hoa': {'name': unicode('Giải trí', 'utf8'), 'tag': ['giai tri', 'van hoa']}
        }

    def __init__(self, host='localhost', port=27017):
        MongoModel.__init__(self, host, port)
        
    def standardizeTimeStr(self, itemTime, itemDate):
        itemDate = re.sub(r'[^0-9/]', '', itemDate).strip()
        timeStr = "{0} {1}".format(itemDate, itemTime)
        try:
            return datetime.datetime.strptime(timeStr, "%d/%m/%Y %H:%M") + datetime.timedelta(seconds=time.timezone)
        except:
            logger.error('error while standardize time string: ({0},{1})'.format(itemTime, itemDate))
            return datetime.datetime.utcnow()
    
    def getLeftRootOfCategory(self, catName):
        db = self.connection[DBNAME]
        collection = db['category']
        q = collection.find_one({'data': catName}, {'lft': 1, 'root_id': 1})
        result = {'category': q['lft'], 'root': q['root_id']} if q else ''
        return result
    
    def parent(self, node, tag, maxLevel=3):
        ilv = 0
        pnode = node
        while pnode.getparent() != None and ilv <= maxLevel:
            ilv += 1
            if pnode.tag == tag: break
            pnode = pnode.getparent()
        return pnode
    
    def getImageAndCaption(self, contentNode, url):
        data = {}
        if contentNode == None: return data
        for img in contentNode.xpath(".//img"):
            src = commonlib.urlJoin(url, commonlib.getAttribText(img, 'src'))
            if src != '':
                pnode = self.parent(img, 'tr', 3)
                captionNode = pnode.getnext()
                captionText = ''
                if captionNode != None:
                    captionText = commonlib.getElementText(captionNode, descendant=1)
                data[hash(src)] = captionText if commonlib.wordCount(captionText) < 40 else '' 
        return data
    
    def getContent(self, output, contentNode, url, thumbnail=''):
        def loaibo(matchStr, blackListWord=[]):
            for blw in blackListWord:
                if matchStr.startswith(blw): return True
                if re.search(blw, matchStr): return True
            return False
        tmpData = html2dict.html2text(etree.tounicode(contentNode), url)
        imageTable = self.getImageAndCaption(contentNode, url)
        stepOver = False
        for i in range(len(tmpData)):
            if (stepOver): stepOver = False; continue
            item = tmpData[i]
            if item['type'] == 'image':
                hashItem = item['hash']
                try:
                    if ((imageTable[hashItem] != '') and (tmpData[i+1]['type'] != 'textbold')):
                        if tmpData[i+1]['data'] == imageTable[hashItem]:
                            stepOver = True
                            item['caption'] = imageTable[hashItem]
                        elif (len(tmpData[i+1]['data']) > len(imageTable[hashItem]) and tmpData[i+1]['data'].startswith(imageTable[hashItem])):
                            tmpData[i+1]['data'] = tmpData[i+1]['data'][len(imageTable[hashItem]):].strip()
                            item['caption'] = imageTable[hashItem]
                    del item['hash']
                except: pass
                src = commonlib.downloadNUpload(ssh, item['data'], DOWNLOAD_PATH, PREFIX)
                if src != '': 
                    item['data'] = src
                    if thumbnail == '': thumbnail = src
                else: continue
            else:
                if loaibo(item['data'], ['>Xem']): continue
            output.append(item)
    
    def getUrlResponse(self, url):
        req = mechanize.Request(url)
        res = mechanize.urlopen(req)
        return res.geturl()
    
    def getDetailNews(self, url):
        logger.debug('call getNewsDetail(url={0})'.format(commonlib.encodeUTF8Str(url)))
        data = {'description': '', 'author': '', 'content': [], 'thumbnail': ''}
        try:
            while True:
                tree = commonlib.getXMLTree(url)
                if tree == '' or tree == None: return
                nextNode = tree.xpath("//table//strong/a[contains(., 'Xem tiếp')]".decode('utf-8'))
                nextUrl = commonlib.getAttribText(nextNode, 'href')
                if nextNode != None:
                    if len(nextNode) > 0: nextNode[0].getparent().remove(nextNode[0])
                nextUrl = commonlib.urlJoin(url, nextUrl) if nextUrl != '' else ''
                contentNode = tree.xpath("//div[@class='content']/div[@cpms_content='true']")
                if len(contentNode) == 0: return
                descNode = tree.xpath("//div[@id='content']//h2[@class='Lead']")
                commonlib.cleanElementWithTag(descNode, remove_tags=['a'])
                desc = commonlib.getElementText(descNode, descendant=1).replace('>', '')
                data['description'] = desc
                data['author'] = commonlib.getElementText(tree.xpath("//div[@id='content']//p[@class='Normal' and @align='right']"), descendant=1)
                commonlib.cleanElementWithAttrib(contentNode[0], 'h1', [('class', 'Title')])
                commonlib.cleanElementWithAttrib(contentNode[0], 'h2', [('class', 'Lead')])
                commonlib.cleanElementWithAttrib(contentNode[0], 'div', [('class', 'tag-parent')])
                commonlib.cleanElementWithAttrib(contentNode[0], 'p', [('class', 'Normal'), ('align', 'right')])
                content = []
                self.getContent(content, contentNode[0], url, data['thumbnail'])
                data['content'] = content
                cprint('Author: {0}'.format(data['author']), 'green')
                cprint('Description: {0}'.format(desc))
                print '--------------------------------------------------'
                # -------------------------------------------------------------------
                for item in content:
                    if item['type'] == 'image':
                        print item['type'], ': ', item['data'], '-> ', item['caption']
                    else:
                        print item['type'], ': ', item['data']
                # -------------------------------------------------------------------
                if nextUrl == '': break
                url = nextUrl
        except:
            logger.exception(str(sys.exc_info()[1]))
        finally:
            return data
    
    def getListNews(self, catId):
        logger.debug('call getListNews(catId={0})'.format(catId))
        url = self.rootUrl.format(catId)
        global totalNewsCrawlered, totalNewsDuplicated
        currentProcessed = 0
        currentDuplicated = 0
        try:
            db = self.connection[DBNAME]
            collection = db['article']
            url = self.rootUrl.format(catId)
            cat = self.getLeftRootOfCategory(self.categories[catId]['name'])
            if cat == '': logger.warning('The category {0} doesn\'t exist'.format(commonlib.toAscii(self.categories[catId]['name']))); return
            while url != '':
                if currentProcessed > MAX_PROCESSED or currentDuplicated > MAX_DUPLICATED:
                    logger.info('stop crawler category {0}; currentProcessed = {1}; currentDuplicated = {2}'.format(commonlib.toAscii(self.categories[catId]['name']), currentProcessed, currentDuplicated))
                    break
                itemQueue = {}
                tree = commonlib.getXMLTree(url)
                if tree == '' or tree == None: return
                firstItemNode = tree.xpath("//div[@class='folder-top']")
                if firstItemNode != None:
                    link = commonlib.getAttribText(firstItemNode[0].xpath('.//a'), 'href') 
                    link = commonlib.urlJoin(url, link) if link != '' else ''
                    resLink = self.getUrlResponse(link)
                    hashUrl = commonlib.getMD5Hash(resLink)
                    if collection.find_one({'hashUrl': hashUrl}):
                        currentDuplicated += 1
                        logger.info('This article already existed in database !')
                    else:
                        thumbnail = commonlib.getAttribText(firstItemNode[0].xpath(".//a/img[contains(@class, 'img-topsubject')]"), 'src')
                        thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
#                        if thumbnail != '': thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                        if thumbnail != '': thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                        title = commonlib.getElementText(firstItemNode[0].xpath(".//a[@class='link-topnews']"), descendant=1)
                        itemTime = commonlib.getElementText(firstItemNode[0].xpath(".//label[@class='item-time']"))
                        itemDate = commonlib.getElementText(firstItemNode[0].xpath(".//label[@class='item-date']"))
                        postDate = self.standardizeTimeStr(itemTime, itemDate)
                        itemQueue[hashUrl] = {'link': resLink, 'hashUrl': hashUrl, 'thumbnail': thumbnail, 'title': title, 'postDate': postDate}
                        currentProcessed += 1
                for item in tree.xpath("//div[@class='folder-news' and position()<last()]"):
                    if currentProcessed > MAX_PROCESSED or currentDuplicated > MAX_DUPLICATED:
                        logger.info('stop crawler category {0}; currentProcessed = {1}; currentDuplicated = {2}'.format(commonlib.toAscii(self.categories[catId]['name']), currentProcessed, currentDuplicated))
                        break
                    title = commonlib.getElementText(item.xpath("./p/a[@class='link-title']"), descendant=1)
                    link = commonlib.getAttribText(item.xpath("./p/a[@class='link-title']"), 'href')
                    link = commonlib.urlJoin(url, link) if link != '' else ''
                    if link == '': continue
                    resLink = self.getUrlResponse(link)
                    hashUrl = commonlib.getMD5Hash(resLink)
                    if collection.find_one({'hashUrl': hashUrl}):
                        currentDuplicated += 1
                        logger.info('This article already existed in database !')
                        continue
                    currentProcessed += 1
                    thumbnail = ''
                    thumbnailNode = item.xpath(".//a/img[contains(@class, 'img-subject')]")
                    if thumbnailNode != None:
                        thumbnail = commonlib.getAttribText(thumbnailNode, 'src')
                        thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                        if thumbnail != '': thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                    itemTime = commonlib.getElementText(item.xpath(".//label[@class='item-time']"))
                    itemDate = commonlib.getElementText(item.xpath(".//label[@class='item-date']"))
                    postDate = self.standardizeTimeStr(itemTime, itemDate)
                    itemQueue[hashUrl] = {'link': resLink, 'hashUrl': hashUrl, 'thumbnail': thumbnail, 'title': title, 'postDate': postDate}
                for item in itemQueue.values():
                    link = item['link']
                    title = item['title']
                    print '--------------------------------------------------'
                    print 'Title: {0}'.format(title)
                    cprint('Link: {0}'.format(link), 'cyan')
                    cprint('Postdate: {0}'.format(item['postDate']), 'yellow')
                    detail = self.getDetailNews(link)
                    if ((len(detail['content']) == 0) or (len(detail) == 0)): continue
                    collection.save({'hashUrl': item['hashUrl'],
                        'title': title,
                        'thumbnail': item['thumbnail'] if item['thumbnail'] != '' else detail['thumbnail'],
                        'description': detail['description'],
                        'content': detail['content'],
                        'newsLink': link,
                        'update': item['postDate'],
                        'source': 'vnexpress.net',
                        'author': '',
                        'category': cat['category'],
                        'root': cat['root'],
                        'is_active': True,
                        'lastupdate': datetime.datetime.utcnow(),
                        'timestamp': time.time(),
                        'date': datetime.datetime.utcnow(),
                        'tag': self.categories[catId]['tag']
                        })
                    totalNewsCrawlered += 1
                itemQueue = {}
                nextNode = tree.xpath("//div[@class='folder-news']/div[@class='continue']/a[contains(., 'Xem tiếp')]".decode('utf-8'))
                if nextNode == None: url = ''
                urlNext = commonlib.getAttribText(nextNode[0], 'href')
                urlNext = commonlib.urlJoin(url, urlNext) if urlNext != '' else ''
                if urlNext == '': url = ''
                url = urlNext.replace(' ', '%20')
            totalNewsDuplicated += currentDuplicated
        except:
            logger.exception(str(sys.exc_info()[1]))
        logger.debug('finished getListNews(catId={0})'.format(catId))

def quitIfTimeout():
    logger.debug('call quitIfTimeout')
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 600:
                if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                    logger.critical(unicode("crawler tin tức vnexpress.net không hoạt động", 'utf8'))
                else:
                    logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
                logger.info('process timeout {0}'.format(delta))
                logger.info('kill process {0}'.format(pid))
                os.system("kill -9 {0}".format(pid))
        except:
            logger.error('ERROR: could not kill python process with pid={0}'.format(pid))
        time.sleep(10)

if __name__ == '__main__':
    totalNewsCrawlered = 0
    totalNewsDuplicated = 0
    import threading
    lastaction = time.time()
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    # -----------------------------
    pid = os.getpid()
    threading.Thread(target=quitIfTimeout).start()
    logger.info('start crawler vnexpress.net with pid: {0}'.format(pid))
    crawler = NewsCrawler(MONGO_SERVER, MONGO_PORT)
    try:
        pool = workerpool.WorkerPool(size=2)
        pool.map(crawler.getListNews, crawler.categories.keys())
        pool.shutdown()
        pool.wait()
    except:
        logger.error(str(sys.exc_info()[1]))
    if ssh is not None: ssh.close()
    if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
        logger.critical(unicode("crawler tin tức vnexpress.net không hoạt động", 'utf8'))
    else:
        logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
    logger.info('finished crawler vnexpress.net with pid: {0} at {1}'.format(pid, datetime.datetime.now()))
    forceQuit()
