# -*- coding: utf-8 -*-

import traceback, os
import commonlib, datetime, time
import workerpool
import logging
import re
import html2dict
import sys
import threading
from MongoModel import MongoModel
from MongoDbLog import MongoDbLog
from termcolor import cprint
from lxml import etree
from readability.readability import Document

MAX_PROCESSED = 15
MAX_DUPLICATED = 10
DBNAME = 'tintuc_v2'
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
MONGO_PORT = 27017
PREFIX = '/uploads/news/'
logger = commonlib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'news.zing.vn')
iphoneUserAgent = 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16'

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9) # kill -9 pid tren linux

class CrawlerNews(MongoModel):
    
    rootUrl = 'http://www.zing.vn/news/{0}.html?p={1}'
    categories = {'xa-hoi': {'name': 'Văn hóa - Xã hội', 'tag': ['xa hoi']},
        'the-gioi': {'name': 'Thế giới', 'tag': ['the gioi']},
        'nhip-song-tre/giao-duc': {'name': 'Giáo dục', 'tag': ['giao duc']},
        'am-nhac': {'name': 'Giải trí', 'tag': ['am nhac', 'giai tri']},
        'phim-anh': {'name': 'Giải trí', 'tag': ['phim anh', 'giai tri']},
        'giai-tri/choi-vui': {'name': 'Giải trí', 'tag': ['giai tri']},
        'sanh-dieu/mobile': {'name': 'Công nghệ số', 'tag': ['cong nghe so', 'mobile']},
        'sanh-dieu/cong-nghe': {'name': 'Công nghệ số', 'tag': ['cong nghe so']},
        'tinh-yeu-gioi-tinh': {'name': 'Tâm sự', 'tag': ['tinh yeu', 'gioi tinh', 'tam su']},
        'hinh-su': {'name': 'Pháp luật', 'tag': ['hinh su', 'phap luat']},
        'dep': {'name': 'Làm đẹp - Thời trang', 'tag': ['dep']},
        'giai-tri/an-ngon': {'name': 'Ẩm thực', 'tag': ['am thuc']}
        }
    
    def __init__(self, host='localhost', port=27017):
        MongoModel.__init__(self, host, port)
        self.__alreadyDetected = False
        self.__step = 12

    def getLeftRootOfCategory(self, catName):
        db = self.connection[DBNAME]
        collection = db['category']
        q = collection.find_one({'data': catName}, {'lft': 1, 'root_id': 1})
        result = {'category': q['lft'], 'root': q['root_id']} if q else ''
        return result
    
    def standardizeTimeString(self, timeStr):
        'Thứ sáu, 29/07/2011 | 06:26'
        try:
            timeStr = re.sub(r'/12', '/{0}'.format(datetime.datetime.now().year), timeStr)
            pat = r'(\d+/\d+/\d+ \d+:\d+)'
            timeStr = commonlib.extractWithRegEx(pat, timeStr, 1)
            return datetime.datetime.strptime(timeStr, "%d/%m/%Y %H:%M") + datetime.timedelta(seconds=time.timezone)
        except:
            cprint('Không chuẩn hóa được thời gian của bài viết ({0})'.format(timeStr))
            return datetime.datetime.utcnow()
    
    def identifyId(self, url):
        '''http://www.zing.vn/news/xa-hoi/co-son-nu-si-tinh-lac-loi-me-hon-tran/a122690.html'''
        logger.debug(unicode('call identifyId from url={0}'.format(url), 'utf8'))
        pat = r'/[a-z]+(\d+)\.html'
        id = commonlib.extractWithRegEx(pat, url, 1)
        cprint('id of news {0}'.format(id), 'yellow')
        return "zing_{0}".format(id) if id != '' else ''
    
    def parent(self, node, tag, maxLevel=3):
        ilv = 0
        pnode = node
        while pnode.getparent() != None and ilv <= maxLevel:
            ilv += 1
            if pnode.tag == tag: break
            pnode = pnode.getparent()
        return pnode
    
    def getImageAndCaption(self, contentNode, url):
        data = {}
        if contentNode == None: return data
        for img in contentNode.xpath(".//img"):
            src = commonlib.urlJoin(url, commonlib.getAttribText(img, 'src'))
            if src != '':
                pnode = self.parent(img, 'tr', 2)
                captionText = ''
                if pnode.tag == 'tr':
                    captionNode = pnode.xpath("./following-sibling::*[1]//*[@class='pCaption']")
                    if len(captionNode) > 0:
                        captionText = commonlib.getElementText(captionNode, descendant=1)
                data[hash(src)] = captionText if commonlib.wordCount(captionText) < 40 else '' 
        return data 
        
    def getDetailNews(self, url, thumbnail=''):
        logger.debug(unicode('start getDetailNews(url={0})'.format(url), 'utf8'))
        data = {'content': [], 'postDate': ''}
        try:
            tree = commonlib.getXMLTree(url)
            if tree == None: return
            postTime = commonlib.getElementText(tree.xpath("//div[@class='container']//div[@class='datetime']"))
            postTime = self.standardizeTimeString(postTime)
            print 'Post Time: {0}'.format(postTime)
            data['postDate'] = postTime
            contentNode = tree.xpath("//div[@id='content_document']")
            if contentNode == None: return data
            contentNode = contentNode[0]
            for item in [('h1', [('class', 'pTitle')]), ('p', [('class', 'pSource')]), ('p', [('class', 'pAuthor')])]:
                commonlib.cleanElementWithAttrib(contentNode, item[0], item[1])
            commonlib.dropTagWithIndex(contentNode, 'p', 1, ('class', 'pHead'))
            tmpData = html2dict.html2text(etree.tounicode(contentNode), url)
            imageTable = self.getImageAndCaption(contentNode, url)
            stepOver = False
            content = []
            for i in range(len(tmpData)):
                if (stepOver): stepOver = False; continue
                item = tmpData[i]
                if item['type'] == 'image':
                    hashItem = item['hash']
                    try:
                        if ((imageTable[hashItem] != '') and (tmpData[i+1]['type'] != 'textbold')):
                            if tmpData[i+1]['data'] == imageTable[hashItem]:
                                stepOver = True
                                item['caption'] = imageTable[hashItem]
                            elif (len(tmpData[i+1]['data']) > len(imageTable[hashItem]) and tmpData[i+1]['data'].startswith(imageTable[hashItem])):
                                tmpData[i+1]['data'] = tmpData[i+1]['data'][len(imageTable[hashItem]):].strip()
                                item['caption'] = imageTable[hashItem]
                        del item['hash']
                    except: pass
                    src = commonlib.downloadNUpload(ssh, re.sub(r'(\.\./)+', '', item['data']), DOWNLOAD_PATH, PREFIX)
                    if src != '': 
                        item['data'] = src
                        if thumbnail == '': thumbnail = src
                    else: continue
                content.append(item)
            data['content'] = content
            print '----------------------------------------'
            for i in content:
                if i['type'] == 'image': print i['type'], ' : ', i['data'], ' ( ' , i['caption'], ' ) '
                else:
                    print i['type'], ' : ', i['data']
            print '----------------------------------------'
        except:
            logger.exception(str(sys.exc_info()[1]))
        finally:
            return data
            
    def getListNews(self, catId, url, currentProcessed=0, currentDuplicated=0):
        logger.debug(unicode('start getListNews(catId={0}, url={1}, currentProcessed={2}, currentDuplicated={3})'.format(catId, url, currentProcessed, currentDuplicated), 'utf8'))
        try:
            listOfNodes = []
            db = self.connection[DBNAME]
            collection = db['article']
            cat = self.getLeftRootOfCategory(self.categories[catId]['name'])
            cprint("root={0}, left={1}".format(cat['root'], cat['category']), 'red')
            if cat == '':
                logger.info(unicode('Chuyên mục "%s" không tồn tại !!!' % self.categories[catId]['name']), 'utf8')
                return
            tree = commonlib.getXMLTree(url)
            if tree == None or tree == '': 
                cprint('Error: Không build được tree', 'red'); return
            for item in tree.xpath("//div[@class='dot_Hotnews']"):
                link = commonlib.getAttribText(item.xpath(".//div[@class='head01']/a"), 'href')
                if link == '': continue
                link = commonlib.urlJoin(url, link)
                thumbnail = commonlib.getAttribText(item.xpath(".//div[1]/a/img"), 'src')
                thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                title = commonlib.getElementText(item.xpath(".//div[@class='head01']/a"), descendant=1)
                description = commonlib.stringify(item.xpath("./p"))
                # --> check trung
                id = self.identifyId(link)
                hashUrl = commonlib.getMD5Hash(id)
                if collection.find_one({'hashUrl': hashUrl}):
                    currentDuplicated += 1
                    cprint('Already existed in database !!!', 'red')
                    continue
                listOfNodes.append({'title': title, 'link': link, 'thumbnail': thumbnail, 'description': description, 'hashUrl': hashUrl})
            for item in tree.xpath("//div[@id='channelList']/div"):
                link = commonlib.getAttribText(item.xpath(".//p[@class='cont09title']//a"), 'href')
                if link == '': continue
                link = commonlib.urlJoin(url, link)
                thumbnail = commonlib.getAttribText(item.xpath(".//div[@class='cont09im']/a/img"), 'src')
                thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                title = commonlib.getElementText(item.xpath(".//p[@class='cont09title']"), descendant=1)
                description = commonlib.getElementText(item.xpath(".//p[@class='cont09txt']"), descendant=1)
                # --> check trung
                id = self.identifyId(link)
                hashUrl = commonlib.getMD5Hash(id)
                if collection.find_one({'hashUrl': hashUrl}):
                    currentDuplicated += 1
                    logger.info(unicode('This news already existed in database with hashUrl={0}'.format(hashUrl), 'utf8'))
                    continue
                listOfNodes.append({'title': title, 'link': link, 'thumbnail': thumbnail, 'description': description, 'hashUrl': hashUrl})
            for item in listOfNodes:
                hashUrl = item['hashUrl']
                if collection.find_one({'hashUrl': hashUrl}): continue
                # ------------------------------------------------
                print '----------------------------------------------------------'
                cprint('Chuyên mục {0}'.format(self.categories[catId]['name']), 'green')
                print 'ID: ', id
                print 'Title: ', item['title']
                print 'Description: ', item['description']
                print 'Link: ', item['link']
                print 'Thumbnail: ', item['thumbnail']
                print '----------------------------------------------------------'
                # ------------------------------------------------
                data = self.getDetailNews(item['link'], item['thumbnail'])
                currentProcessed += 1
                if len(data['content']) == 0: continue
                collection.save({'hashUrl': hashUrl,
                    'title': item['title'],
                    'thumbnail': item['thumbnail'],
                    'description': item['description'],
                    'content': data['content'],
                    'newsLink': item['link'],
                    'update': data['postDate'],
                    'source': 'news.zing.vn',
                    'author': '',
                    'category': cat['category'],
                    'root': cat['root'],
                    'is_active': True,
                    'lastupdate': datetime.datetime.utcnow(),
                    'timestamp': time.time(),
                    'date': datetime.datetime.utcnow(),
                    'tag': self.categories[catId]['tag']
                })
        except:
            logger.exception(str(sys.exc_info()[1]))
        finally:
            return currentProcessed, currentDuplicated
    
    def process(self, catId):
        global totalNewsCrawlered, totalNewsDuplicated
        try:
            currentProcessed = 0
            currentDuplicated = 0
            for iPage in range(1, 3):
                cprint('***** GO TO PAGE {0}'.format(iPage))
                if currentProcessed > MAX_PROCESSED or currentDuplicated > MAX_DUPLICATED:
                    if currentProcessed > MAX_PROCESSED: cprint('Chuyên mục {0} dừng vì vượt quá giới hạn {1} cho phép'.format(self.categories[catId]['name'], MAX_PROCESSED))
                    if currentDuplicated > MAX_DUPLICATED: cprint('Chuyên mục {0} dừng vì số tin trùng quá giới hạn {1} cho phép'.format(self.categories[catId]['name'], MAX_DUPLICATED))
                    break
                url = self.rootUrl.format(catId, iPage)
                currentProcessed, currentDuplicated = self.getListNews(catId, url, currentProcessed, currentDuplicated)
            totalNewsCrawlered += currentProcessed
            totalNewsDuplicated += currentDuplicated
        except:
            logger.error(str(sys.exc_info()[1]))
        return
        
def quitIfTimeout():
    print 'call quitIfTimeout'
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 600:
                if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                    logger.critical(unicode("crawler tin tức news.zing.vn không hoạt động", 'utf8'))
                else:
                    logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
                logger.info('process timeout {0}'.format(delta))
                logger.info('kill process {0}'.format(pid))
                os.system("kill -9 {0}".format(pid))
        except:
            logger.error('ERROR: could not kill python process with pid={0}'.format(pid))
        time.sleep(10)

if __name__ == '__main__':
    totalNewsCrawlered = 0
    totalNewsDuplicated = 0
    lastaction = time.time()
    import argparse
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    # -----------------------------
    logger.info('start crawler news.zing.vn')
    crawler = CrawlerNews(MONGO_SERVER, MONGO_PORT)
    threading.Thread(target=quitIfTimeout).start()
#    crawler.getDetailNews('http://www.zing.vn/news/giao-duc/ho-c-nghe-gi-co-thu-nha-p-cao/a127385.html')
    try:
        crawler.getDetailNews('http://www.zing.vn/news/the-gioi/nhung-cai-chet-oan-uong-dau-nam-moi-2012/a231721.html', '')
#        pool = workerpool.WorkerPool(size=2)
#        pool.map(crawler.process, crawler.categories.keys())
#        pool.shutdown()
#        pool.wait()
    except:
        logger.error(str(sys.exc_info()[1]))
    if ssh is not None: ssh.close()
    if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
        logger.critical(unicode("crawler tin tức news.zing.vn không hoạt động", 'utf8'))
    else:
        logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
    logger.info('finished crawler news.zing.vn')
    forceQuit()
