# -*- coding: utf-8 -*-
''' @author: Dinh Manh Dau <dinhmanhdau@gmail.com>
'''
import pycommonlib as pyclib
import workerpool
import traceback
import datetime, time
import re, os
import threading
from lxml import etree
from pymongo import Connection
from termcolor  import cprint

LOCAL_PATH          = '/home/hoangnamhai/HarvestedData/tintuc/news'
MONGO_SERVER        = 'beta.mana.vn'   
MONGO_PORT          = 27017
DATABASE            = 'tintuc_v2'
PREFIX              = '/uploads/news' 
SITE_URL            = 'http://cand.com.vn'
MAX_COUNT           = 15
MAX_ARTICLE         = 40
MAX_PAGE            = 15
CONNECT             = Connection(MONGO_SERVER, MONGO_PORT)
DB                  = CONNECT[DATABASE]
ARTICLE_COLLECTION  = DB['article']
CATEGORY_COLLECTION = DB['category']
USER_COLLECTION     = DB['backend_user']
IMG_END             = '/Images/reddot.gif'    
os.umask(0000)

logger = pyclib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'cand.com.vn')
totalNewsCrawlered = 0; totalNewsDuplicated = 0
flgCopy             = pyclib.getArgs()
ssh = None; sftp = None
if flgCopy!=None:
    ssh     = pyclib.createSSHClient('mana.vn', 22, 'daudm', '')
    sftp    = ssh.open_sftp()
    if ssh==None:
        if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
            logger.critical(unicode("crawler tin tức cand.com.vn không hoạt động", 'utf8'))
            pyclib.forceQuit()
start = 0

CATEGORIES = {   
   'toipham':  {'link' : 'vi-VN/toiphama-z',   'category': unicode('Pháp luật', 'utf-8'),  'tag': ['toi pham', 'phap luat']},    
   'quocte':   {'link' : 'vi-VN/quocte',   'category': unicode('Thế giới', 'utf-8'),        'tag': ['the gioi', 'quoc te']},       
   'phapluat': {'link' : 'vi-VN/phapluat', 'category': unicode('Pháp luật', 'utf-8'),      'tag': ['phap luat']}, 
   'kinhte':   {'link' : 'vi-VN/kinhte',   'category': unicode('Kinh doanh', 'utf-8'),     'tag': ['kinh te']},
   'vanhoa':   {'link' : 'vi-VN/vanhoa',   'category': unicode('Văn hóa - Xã hội', 'utf-8'),     'tag': ['van hoa', 'xa hoi']},
   'nhanai':   {'link' : '?vi-VN/nhanai',  'category': unicode('Sức khỏe - Giới tính', 'utf-8'), 'tag': ['nhan ai', 'nhan dao']},
   'xahoi':    {'link' : 'vi-VN/xahoi',    'category': unicode('Văn hóa - Xã hội', 'utf-8'),     'tag': ['xa hoi']}, 
   'thoisu':   {'link' : 'vi-VN/thoisu',   'category': unicode('Văn hóa - Xã hội', 'utf-8'),     'tag': ['thoi su']},
   'catronglongdan': {'link' : 'vi-VN/trongmatdan',   'category': unicode('Pháp luật', 'utf-8'), 'tag': ['cong an', 'quan chung']},
   'nguoinoitieng':  {'link' : 'vi-VN/nguoinoitieng', 'category': unicode('Văn hóa - Xã hội', 'utf-8'),     'tag': ['nguoi noi tieng']}, 
}

def getRootLftCategory(name):
    try:
        if name==None or name=='': return None, None 
        root    =   CATEGORY_COLLECTION.find_one({'data': 'Tin tức'}, {'root_id': 1})
        if root==None: 
            result  =   CATEGORY_COLLECTION.find_one({'data': name}, {'root_id': 1, 'lft': 1})
        else:  
            result  =   CATEGORY_COLLECTION.find_one({'data': name, 'root_id': root['root_id']}, {'root_id': 1, 'lft': 1})
        if result==None: cprint('Category chưa tồn tại !', 'red'); return None, None
        else: return result['root_id'], result['lft']
    except:
        traceback.print_exc()

def checkArticleDuplicate(link):
    ''' Kiểm tra trùng tin tức trong DB, trả vê 1 nếu trùng, 0 trong trường hợp ngược lại
    ''' 
    try:
        if link==None or link=='': return None, None
        m           =   pyclib.regexString('/(\d+)/(\d+)/(\d+).cand', link)
        if m: aId   =   '{0}-{1}-{2}'.format(m.group(1),m.group(2), m.group(3)) 
        else: aId   =   pyclib.getMd5(link)   
        result      =   ARTICLE_COLLECTION.find_one({'hashUrl' : aId, 'source': 'cand.com.vn'})
        if result!=None:  cprint('Tin tức đã tồn tại trong cơ sở dữ liệu', 'red'); return 1, aId
        return 0, aId 
    except:
        traceback.print_exc()
        return None, None

def getAuthor(name='crawler'):
    try:
        result = USER_COLLECTION.find_one({'username': name}, {})
        if result==None: result = USER_COLLECTION.find_one({'username': 'Hindua88'}, {})
        if result!=None:
            return result['_id']
    except:
        traceback.print_exc()

def getDateArticle(text):
    try:
        m   = pyclib.regexString('(\d+):(\d+):(\d+) (\d+)/(\d+)/(\d+)', text)
        if m:  
            year = int(float(m.group(6))); month  = int(float(m.group(5))); day = int(float(m.group(4)))
            hour = int(float(m.group(1))); minute = int(float(m.group(2))); second = int(float(m.group(3)))
            return datetime.datetime(year, month, day, hour, minute, second) + datetime.timedelta(seconds=time.timezone)
        else: return datetime.datetime.utcnow()
    except:
        traceback.print_exc()

def processArticle(link, root_id, lft, tag):
    ''' Hàm xử lý chi tiết một tin tức, nếu tin đã tồn tại trong DB thì trả về: 1, ngược lại: 0
    '''
    try:
        global totalNewsCrawlered, totalNewsDuplicated
        if link==None or link=='': return None
        check_exists, aId    =   checkArticleDuplicate(link)
        if check_exists==1: totalNewsDuplicated += 1; return 1
        print ('----------------------------------------------------------------------')
        cprint('Process article: ' + link, 'yellow')
        title = thumbnail = description = ''; data = []; flgImg = False; flagError = 'success'
        flgCaption = False; source_img = ''; flagIntro = False; authorArticle = ''
        tree        =   pyclib.getXMLTree(link)
        authorNode  =   tree.xpath('//table[@id="Table10"]//span[@id="lbAuthor2"]')
        if len(authorNode)>0:
            for author in authorNode:
                acontent    =   pyclib.getStringWithNode(author)
                if acontent!=None and len(acontent)>1: authorArticle = acontent; break
        dateNode    =   tree.xpath('//table[@id="Table12"]//span[@id="lbDate"]')
        postedDate  =   getDateArticle(pyclib.getStringWithNode(dateNode[0]))
        titleNode   =   tree.xpath('//table[@id="Table12"]//span[@id="lbHeadline"]')
        if len(titleNode) > 0: title    =   pyclib.getStringWithNode(titleNode[0])
        descNode    =   tree.xpath('//table[@id="Table12"]//span[@class="main_abstract"]')
        if len(descNode) > 0:
            aNode   = descNode[0].xpath('.//a')
            if len(aNode) > 0:
                for an in aNode: an.getparent().remove(an)  
            description = pyclib.getStringWithNode(descNode[0])
        thumbNode    = tree.xpath('//table[@id="Table12"]//div[1]//table')
        if len(thumbNode) > 0:
            thumbImg    = thumbNode[0].xpath('.//img')
            if len(thumbImg) > 0:
                linkImage = thumbImg[0].get('src')
                result = None; source = file_name = ''; size = 0
                if flgCopy!=None:
                    result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                else:
                    result, source, img_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                if flgImg==False: thumbnail = source; flgImg = True
                ctext   = pyclib.getStringWithNode(thumbNode[0])
                if ctext==None: ctext = ''  
                ctext = pyclib.toUnicodeDungSan(ctext)
                data.append({'data': source, 'type': 'image', 'caption': ctext})
                cprint(source, 'green'); print pyclib.toAscii('Caption : ' + ctext)
        listNode    =   tree.xpath('//table[@id="Table12"]//span[@id="lbBody"]/*')
        if len(listNode)==1:
            strXPath    =   '//table[@id="Table12"]//span[@id="lbBody"]/{0}/*'.format(listNode[0].tag)
            listNode    = tree.xpath(strXPath)
        for node in listNode:
            if node.tag in ['style', 'script']: continue
            imgNode = node.xpath('.//img')
            if len(imgNode) > 0:
                text        =   pyclib.getStringWithNode(node)
                linkImage    =   imgNode[0].get('src')
                if linkImage!= IMG_END:   
                    result = None; source = file_name = ''; size = 0
                    if flgCopy!=None:
                        result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                    else:
                        result, source, img_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                    if result!=None:
                        if flgImg==False: thumbnail = source; flgImg = True
                        if text==None or len(text)<2: flgCaption = True; source_img = source; continue
                        split = False; descendants = node.xpath('.//*')
                        for des in descendants:
                            if des.tag in ['b', 'strong']: split = True; break
                        if split:
                            data.append({'data': source, 'type': 'image', 'caption': ''})
                            cprint('Source image : ' +  source, 'yellow'); print 'Caption :'
                            text = pyclib.toUnicodeDungSan(text)
                            data.append({'data': text, 'type': 'text'})
                            print pyclib.toAscii(text)
                        else:
                            text = pyclib.toUnicodeDungSan(text)    
                            data.append({'data': source, 'type': 'image', 'caption': text})
                            cprint('Source image : ' +  source, 'yellow'); print pyclib.toAscii('Caption :' + text)
                    '''
                    if len(text) < 120:
                        data.append({'data': source, 'type': 'image', 'caption': text})
                        cprint('Source image : ' +  source, 'yellow'); print 'Caption :', text
                    else:
                        data.append({'data': source, 'type': 'image', 'caption': ''})
                        cprint('Source image : ' +  source, 'yellow'); print 'Caption :'
                        data.append({'data': text, 'type': 'text'})
                        cprint(text, 'yellow')
                   '''
                else:
                    if text!=None and len(text)>1: 
                        text = pyclib.toUnicodeDungSan(text)
                        data.append({'data': text, 'type': 'text'}); print pyclib.toAscii(text)
            else:
                content     =   pyclib.getStringWithNode(node)
                if flgCaption: 
                    flgCaption = False
                    if content!=None and len(content)>1:
                        if len(content) < 120: 
                            content = pyclib.toUnicodeDungSan(content)
                            data.append({'data': source_img, 'type': 'image', 'caption': content})
                            cprint('Source image : ' +  source, 'yellow'); print pyclib.toAscii('Caption :' + content)
                        else:
                            data.append({'data': source_img, 'type': 'image', 'caption': ''})
                            cprint('Source image : ' +  source, 'yellow'); print 'Caption :'
                            content = pyclib.toUnicodeDungSan(content)
                            data.append({'data': content, 'type': 'text'}); print  pyclib.toAscii(content)
                    else: 
                        data.append({'data': source_img, 'type': 'image', 'caption': ''})
                        cprint('Source image : ' +  source, 'yellow'); print 'Caption :' 
                    continue 
                if content!=None and content!='':
                    if len(content)>1:        
                        textBold = False; descendants = node.xpath('.//*')
                        if len(descendants) > 0:
                            for des in descendants: 
                                if des.tag in ['b', 'strong']: textBold = True; break
                        if textBold and len(content)>120: textBold = False
                        if textBold: 
                            content = pyclib.toUnicodeDungSan(content)
                            data.append({'data': content, 'type': 'textbold'}); print pyclib.toAscii(content); #print(content, 'yellow')  
                        else: 
                            content = pyclib.toUnicodeDungSan(content)
                            data.append({'data': content, 'type': 'text'}); print pyclib.toAscii(content); # cprint(content, 'green') 
                    
                if node.tail and node.tail!='':
                    contentTail   = node.tail.strip()
                    contentTail   = ' '.join(contentTail.split())
                    if len(contentTail) > 1:
                        contentTail = pyclib.toUnicodeDungSan(contentTail)
                        data.append({'data': contentTail, 'type': 'text'}); print pyclib.toAscii(contentTail)
        # Chỉ lấy 1 phẩn tử đầu tiên phần tử thứ 2 là footer    
        title = pyclib.toUnicodeDungSan(title)
        doc= ({ 'hashUrl'       :   aId,
                'title'         :   title,
                'thumbnail'     :   thumbnail,
                'description'   :   description,
                'content'       :   data,
                'newsLink'      :   link,
                'update'        :   postedDate,
                'source'        :   'cand.com.vn',
                'category'      :   lft,
                'author'        :   authorArticle,
                'root'          :   root_id,
                'is_active'     :   True,
                'lastupdate'    :   datetime.datetime.utcnow(),
                'timestamp'     :   time.time(),
                'date'          :   datetime.datetime.utcnow(),
                'tags'          :   tag, })
        if len(data) > 0: 
            totalNewsCrawlered += 1; ARTICLE_COLLECTION.save(doc)
        print postedDate
        print aId
        print pyclib.toAscii('Title: ' + title)
        cprint('Thumbnail: ' + thumbnail, 'green')
        print pyclib.toAscii('Intro: ' + description)
        print pyclib.toAscii('Author: ' + authorArticle)
        return 0
    except:
        traceback.print_exc()                
        
def processCategory(cat, link=None, page=None, gbcount = 0, loop = 0):
    try:
        root_id, lft = getRootLftCategory(CATEGORIES[cat]['category'])
        if root_id==None: return
        if page==None:  page = 1
        else: 
            if page>MAX_PAGE: return
        if link==None:  lurl = '{0}/{1}/Trang-{2}.cand'.format(SITE_URL, CATEGORIES[cat]['link'], page)
        else:           lurl = link
        cprint('Process page : ' + lurl, 'yellow')
        tree        =   pyclib.getXMLTree(lurl)
        firstNode   =   tree.xpath('//div[@id="lstZC_Noibatchuyenmuc1_panelMain"]/div[2]/a')
        if len(firstNode) > 0:
            c = processArticle(firstNode[0].get('href'), root_id, lft, CATEGORIES[cat]['tag'])
            if c!=None: gbcount += c; loop += 1
        listNode    =   tree.xpath('//table[@id="Table5"]//a[@class="news_title"]')
        if len(listNode) <= 0: return   
        for node in listNode:
            print 'COUNT, LOOP : (', gbcount, ', ', loop, ')' 
            if gbcount>MAX_COUNT or loop>MAX_ARTICLE:
                cprint('Dừng xử lý do vượt quá số lượng trùng lặp cho phép hoặc vượt quá số lượng tin cần lấy.', 'red')
                return
            c = processArticle(node.get('href'), root_id, lft, CATEGORIES[cat]['tag'])
            if c!=None: gbcount += c; loop += 1
        nextPage    = page + 1
        linkNextPage    =  '{0}/{1}/Trang-{2}.cand'.format(SITE_URL, CATEGORIES[cat]['link'], nextPage)   
        processCategory(cat, linkNextPage, nextPage, gbcount, loop)
    except:
        traceback.print_exc()

def forceQuit():
    try:
        print 'Finished.', datetime.datetime.now()
        pid = os.getpid(); os._exit(1); os.kill(pid, 9)
    except:
        traceback.print_exc()
        
def timeOut():        
    global totalNewsCrawlered, totalNewsDuplicated
    while True:
        delta = time.time() - start
        if delta > 900:
            print 'Dừng chương trình vì vượt quá thời gian chạy.', datetime.datetime.now()
            if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                logger.critical(unicode("crawler tin tức cand.com.vn không hoạt động", 'utf8'))
            else:
                logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
            logger.info('process timeout {0}'.format(delta))
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        time.sleep(30)

if __name__ == '__main__':
    try:
        cprint('start crawler cand.com.vn', 'yellow')
        totalNewsCrawlered = 0; totalNewsDuplicated = 0
        start = time.time() 
        timeout = threading.Thread(target=timeOut).start()
        pool = workerpool.WorkerPool(size=1)
        pool.map(processCategory, CATEGORIES.keys())
        pool.shutdown(); pool.wait()
        if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
            logger.critical(unicode("crawler tin tức cand.com.vn không hoạt động", 'utf8'))
        else:
            logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
        logger.info('finished crawler cand.com.vn')
        if flgCopy!=None: ssh.close()
        forceQuit()
    except:
        traceback.print_exc()
