# -*- coding: utf-8 -*-

from MongoModel import MongoModel
from termcolor import cprint
import commonlib
import datetime
import logging
import os
import re
import sys
import time
import traceback
import workerpool

DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
MONGO_PORT = 27017
DBNAME = 'tintuc_v2'
PREFIX = '/uploads/news/'
MAX_PROCESSED = 15
MAX_DUPLICATE = 10

logger = commonlib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'afamily.vn')

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9)  # force kill ~ kill -9 pid

class CrawlerNews(MongoModel):
    
    queueUrl = {}
    
    pageUrl = 'http://afamily.vn/{0}/trang-{1}.chn'
    
    categories = {#'van-hoa': {'name': 'Văn hóa - Xã hội', 'tag': ['xa hoi', 'chinh tri', 'van hoa']},
                  'suc-khoe': {'name': 'Sức khỏe - Giới tính', 'tag': ['suc khoe', 'gioi tinh']},
                  'du-lich': {'name': 'Văn hóa - Xã hội', 'tag': ['van hoa', 'du lich']},
                  'dep': {'name': 'Làm đẹp - Thời trang', 'tag': ['lam dep']},
                  'nha-dep': {'name': 'Làm đẹp - Thời trang', 'tag': ['lam dep']},
                  'phim-anh': {'name': 'Giải trí', 'tag': ['giai tri', 'phim-anh']},
                  'tinh-yeu-hon-nhan': {'name': 'Tâm sự', 'tag': ['tam su', 'tinh yeu', 'hon nhan']},
                  'tam-su': {'name': 'Tâm sự', 'tag': ['tam su', 'tinh yeu', 'hon nhan']},
                  'an-ngon': {'name': 'Ẩm thực', 'tag': ['am thuc', 'an ngon']},
                  #'mua-sam': {'name': 'Tiêu dùng', 'tag': ['tieu dung', 'mua sam']},
                  }
    
    def __init__(self, host='localhost', port=27017):
        MongoModel.__init__(self, host, port)
        
    def getLeftRootOfCategory(self, catName):
        db = self.connection[DBNAME]
        collection = db['category']
        q = collection.find_one({'data': catName}, {'lft': 1, 'root_id': 1})
        result = {'category': q['lft'], 'root': q['root_id']} if q else ''
        return result
    
    def standardizeTimeStr(self, timeStr):
        try:
            return datetime.datetime.strptime(timeStr, "%d-%m-%Y %H:%M:%S") + datetime.timedelta(seconds=time.timezone)
        except:
            cprint('Không convert được time của bài viết-> lấy time hiện tại', 'red')
            return datetime.datetime.utcnow()
    
    def addQueue(self, queueUrl, item={}):
        if not item.has_key('link'): return
        if item['link'] == '' or item['title'] == '': return
        id = self.identityId(item['link'])
        cprint(id, 'red')
        hashUrl = commonlib.getMD5Hash(id)
        duplicate = False
        if not queueUrl.has_key(hashUrl):
            queueUrl[hashUrl] = item
        else:
            duplicate = True
        return duplicate
    
    def getQueue(self, queueUrl):
        k, v = queueUrl.popitem()
        cprint('getQueue key: {0}'.format(k), 'red')
        return v
    
    def getImgOfNode(self, node, baseUrl):
        for item in node.xpath("./descendant-or-self::*"):
            if item.tag == 'img':
                src = commonlib.getAttribText(item, 'src')
                src = commonlib.urlJoin(baseUrl, src) if src != '' else ''
                if src != '':
                    src = commonlib.downloadNUpload(ssh, src, DOWNLOAD_PATH, PREFIX)
                return src
    
    def getDataOfTableNode(self, node, baseUrl):
        ''' truong hop nhung bai viet dang nau an
            - td 1: chua anh
            - td 2: cach nau
        '''
        data = []
        f = True
        for tr in node.xpath(".//tr"):
            if len(tr.getchildren()) != 2: f = False; break
        if not f: return None
        for tr in node.xpath(".//tr"):
            try:
                img = self.getImgOfNode(tr.xpath("./td[1]")[0], baseUrl)
                data.append({'type': 'image', 'data': img, 'caption': ''})
                text = commonlib.getElementText(tr.xpath("./td[2]"), descendant=1)
                if text != '':
                    data.append({'type': 'text', 'data': text})
            except: pass
        return data
    
    def identityId(self, url):
        pat = r'.+/(\d+)/.+'
        return commonlib.extractWithRegEx(pat, url, 1)
    
    def processDetail(self, url):
        logger.debug(unicode('start processDetail(url={0})'.format(url), 'utf8'))
        data = {'content': [], 'description': '', 'timeOfPost': '', 'thumbnail': ''}
        try:
            content = []
            tree = commonlib.getXMLTree(url)
            if tree == '' or tree == None: return
            description = commonlib.getElementText(tree.xpath("//div[@class='box11']/h2[@class='title']"), descendant=1)
            timeOfPost = commonlib.getElementText(tree.xpath("//div[@class='box11']/p[@class='time']"), descendant=1)
            timeOfPost = self.standardizeTimeStr(timeOfPost)
            chitietNode = tree.xpath("//div[@id='divChiTiet']")
            if len(chitietNode) == 0: return
            text = commonlib.getElementText(chitietNode[0])
            if text != '': content.append({'type': 'text', 'data': text})
            stepOver = False
            if len(chitietNode[0].getchildren()) < 4: return
            for item in tree.xpath("//div[@id='divChiTiet']/*"):
                if item.tag == 'table':
                    for ii in self.getDataOfTableNode(item, url): content.append(ii)
                    continue
                fimg = False; fbold = False
                
                cimg = len([x for x in item.iterdescendants(tag='img')]) if item.tag != 'img' else 1
                
                ic_img = 0
                for iitem in item.xpath("./descendant-or-self::*"):
                    istyle = iitem.get('style', '').lower()
                    if re.search(r'bold', istyle) or iitem.tag in ['b', 'strong']:
                        fbold = True
                    if iitem.tag == 'img':
                        ic_img += 1
                        fimg = True
                        src = commonlib.getAttribText(iitem, 'src')
                        src = commonlib.urlJoin(url, src) if src != '' else ''
                        if src != '':
                            if data['thumbnail'] == '': data['thumbnail'] = src
                            src = commonlib.downloadNUpload(ssh, src, DOWNLOAD_PATH, PREFIX)
                            caption = ''
                            if ic_img == cimg:      # --> chi lay caption cho anh cuoi cung
                                if len(item.getchildren()) == 1:
                                    nextNode = item.xpath("./following-sibling::*[1]")
                                    if len(nextNode) > 0:
                                        nextNode = nextNode[0]
                                        if nextNode.tag == 'div' and nextNode.get('align') == 'center':
                                            text = commonlib.getElementText(nextNode, descendant=1)
                                            wc = commonlib.wordCount(text)
                                            if wc > 0 and wc < 18:
                                                caption = text
                                                stepOver = True
                                else:
                                    text = commonlib.getElementText(item, descendant=1)
                                    if commonlib.wordCount(text) > 20:
                                        content.append({'type': 'text', 'data': text})
                                    else:
                                        caption = text
                            content.append({'type': 'image', 'data': src, 'caption': caption})
                if item.tail != None:
                    text = commonlib.getElementText(item, tail=1, text=0)
                    if text != '': content.append({'type': 'text', 'data': text})
                if fimg: fimg = False; continue
                if stepOver:
                    stepOver = False; continue
                text = commonlib.getElementText(item, descendant=1)
                if text != '' and not text.startswith('>>'):
                    if fbold and commonlib.wordCount(text) <= 25:
                        content.append({'type': 'textbold', 'data': text})
                    else:
                        content.append({'type': 'text', 'data': text})
                if item.tail != None:
                    text = commonlib.getElementText(item, tail=1, text=0)
                    if text != '' and re.search(r'\w+', text):
                        content.append({'type': 'text', 'data': text})
            
            for line in content:
                if line['type'] == 'image':
                    print line['caption'], '->', line['data']
                else:
                    print line['type'], '->', line['data']
            if len(content) == 0: return
            else:
                data['description'] = description
                data['timeOfPost'] = timeOfPost
                data['content'] = content
        except:
            logger.exception(str(sys.exc_info()[1]))
        finally:
            return data
    
    def processCategory(self, catId, url, currentProcessed=0, currentDuplicated=0):
        logger.debug(unicode('start processCategory(catId={0}, url={1}, currentProcessed={2}, currentDuplicated={3}'.format(catId, url, currentProcessed, currentDuplicated), 'utf8'))
        queue = {}
        try:
            db = self.connection[DBNAME]
            collection = db['article']
            cat = self.getLeftRootOfCategory(self.categories[catId]['name'])
            if cat == '':
                logger.info(unicode('chuyen muc: "%s" khong ton tai' % self.categories[catId]['name'], 'utf8'))
                return
            tree = commonlib.getXMLTree(url)
            if tree == None or tree == '': return
            # --> lay phan top tin cua mot trang
            box6Div = tree.xpath("//div[@class='box6']/div[1]")
            if len(box6Div) > 0:
                title = commonlib.getElementText(box6Div[0].xpath("./h1/a"))
                link = commonlib.getAttribText(box6Div[0].xpath("./h1/a"), 'href')
                link = commonlib.urlJoin(url, link) if link != '' else ''
                thumbnail = commonlib.getAttribText(box6Div[0].xpath(".//a/img[@class='b8img']"), 'src')
                thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                if thumbnail != '': thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                if link != '':
                    self.addQueue(queue, {'title': title, 'link': link, 'thumbnail': thumbnail, 'catId': catId})
                
            for item in tree.xpath("//div[@class='box6']/ul/li/a"):
                title = commonlib.getElementText(item, descendant=1)
                link = commonlib.getAttribText(item, 'href')
                link = commonlib.urlJoin(url, link) if link != '' else ''
                if link == '': continue
                thumbnail = commonlib.getAttribText(item.xpath("./img"), 'src')
                thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                if thumbnail != '': thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                self.addQueue(queue, {'title': title, 'link': link, 'thumbnail': thumbnail, 'catId': catId})
                    
            # --> lay phan list tin trong trang
            
            for item in tree.xpath("//div[@class='fl w700']/div[@class='box8']/p[@class='time']"):
                link = commonlib.getAttribText(item.xpath("./following-sibling::*[1]"), 'href')
                link = commonlib.urlJoin(url, link) if link != '' else ''
                if link == '': continue 
                title = commonlib.getElementText(item.xpath("./following-sibling::*[1]"), descendant=1)
                thumbnail = commonlib.getAttribText(item.xpath("./following-sibling::*[1]/img"), 'src')
                thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                if thumbnail != '': thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                self.addQueue(queue, {'title': title, 'link': link, 'thumbnail': thumbnail, 'catId': catId})
                for iitem in item.xpath("./following-sibling::*[2]/li/a"):
                    title = commonlib.getElementText(iitem, descendant=1)
                    link = commonlib.getAttribText(iitem, 'href')
                    link = commonlib.urlJoin(url, link) if link != '' else ''
                    if link == '': continue
                    thumbnail = ''
                    self.addQueue(queue, {'title': title, 'link': link, 'thumbnail': thumbnail, 'catId': catId})
                    
            
            while len(queue) > 0:
                item = self.getQueue(queue)
                id = self.identityId(item['link'])
                hashUrl = commonlib.getMD5Hash(id)
                if collection.find_one({'hashUrl': hashUrl}):
                    currentDuplicated += 1
                    cprint('Already existed in databse: {0}'.format(item['link']), 'red')
                else:
                    data = self.processDetail(item['link'])
                    if len(data['content']) == 0: continue
                    collection.save({'hashUrl': hashUrl,
                        'title': item['title'],
                        'thumbnail': item['thumbnail'] if item['thumbnail'] != '' else data['thumbnail'],
                        'description': data['description'],
                        'content': data['content'],
                        'newsLink': item['link'],
                        'update': data['timeOfPost'],
                        'source': 'afamily.vn',
                        'author': '',
                        'category': cat['category'],
                        'root': cat['root'],
                        'is_active': True,
                        'lastupdate': datetime.datetime.utcnow(),
                        'timestamp': time.time(),
                        'date': datetime.datetime.utcnow(),
                        'tag': self.categories[catId]['tag']
                    })
                    currentProcessed += 1
        except:
            logger.exception(str(sys.exc_info()[1]))
        finally:
            return currentProcessed, currentDuplicated
    
    def process(self, catId):
        logger.debug(unicode('start process(catId={0})'.format(catId), 'utf8'))
        global totalNewsCrawlered, totalNewsDuplicated
        try:
            currentProcessed = 0
            currentDuplicated = 0
            for i in range(1, 3):
                url = self.pageUrl.format(catId, i)
                currentProcessed, currentDuplicated = self.processCategory(catId, url, currentProcessed, currentDuplicated)
                if currentDuplicated > MAX_DUPLICATE or currentProcessed > MAX_PROCESSED:
                    if currentDuplicated > MAX_DUPLICATE: logger.info(unicode('số bài viết trùng quá ngưỡng {0} bài viết'.format(MAX_DUPLICATE), 'utf8'))
                    if currentProcessed > MAX_PROCESSED: logger.info(unicode('số bài viết quá giới hạn {0} cho phép'.format(MAX_PROCESSED), 'utf8'))
                    break
            totalNewsCrawlered += currentProcessed
            totalNewsDuplicated += currentDuplicated
        except:
            logger.error(str(sys.exc_info()[1]))
        finally:
            return
        
def quitIfTimeout():
    print 'call quitIfTimeout'
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 600:
                if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                    logger.critical(unicode("crawler tin tức afamily.vn không hoạt động", 'utf8'))
                else:
                    logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
                logger.info('process timeout {0}'.format(delta))
                logger.info('kill process {0}'.format(pid))
                os.system("kill -9 {0}".format(pid))
        except:
            logger.error('ERROR: could not kill python process with pid={0}'.format(pid))
        time.sleep(10)
    
if __name__ == '__main__':
    totalNewsCrawlered = 0
    totalNewsDuplicated = 0
    import argparse
    import threading
    lastaction = time.time()
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    # -----------------------------
    threading.Thread(target=quitIfTimeout).start()
    logger.info('start crawler afamily.vn at {0}'.format(datetime.datetime.now()))
    crawler = CrawlerNews(MONGO_SERVER, MONGO_PORT)
#    crawler.process('van-hoa')
    try:
        pool = workerpool.WorkerPool(size=2)
        pool.map(crawler.process, crawler.categories.keys())
        pool.shutdown()
        pool.wait()
    except:
        logger.error(str(sys.exc_info()[1]))
    if ssh is not None: ssh.close()
    if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
        logger.critical(unicode("crawler tin tức afamily.vn không hoạt động", 'utf8'))
    else:
        logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
    logger.info('finished crawler afamily.vn at {0}'.format(datetime.datetime.now()))
    forceQuit()
