# -*- coding: utf-8 -*-
''' @author: Dinh Manh Dau <dinhmanhdau@gmail.com>
'''
import pycommonlib as pyclib
import workerpool
import traceback
import datetime, time
import re, os
import threading

import html2textlib
from lxml import    etree
from pymongo import Connection
from termcolor  import cprint

LOCAL_PATH          = '/home/hoangnamhai/HarvestedData/tintuc/news'
MONGO_SERVER        = 'beta.mana.vn'   
MONGO_PORT          = 27017
DATABASE            = 'tintuc_v2'
PREFIX              = '/uploads/news' 
SITE_URL            = 'http://hoahoctro.vn'
MAX_COUNT           = 15
MAX_ARTICLE         = 30
MAX_PAGE            = 10
os.umask(0000)

logger = pyclib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'hoahoctro.vn')
totalNewsCrawlered = 0; totalNewsDuplicated = 0
flgCopy             = pyclib.getArgs()
ssh = None; sftp = None
if flgCopy!=None:
    ssh     = pyclib.createSSHClient('mana.vn', 22, 'daudm', '')
    sftp    = ssh.open_sftp()
    if ssh==None:
        if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
            logger.critical(unicode("crawler tin tức hoahoctro.vn không hoạt động", 'utf8'))
            pyclib.forceQuit()
start = 0

class Hoahoctro():
    CATEGORIES = { 
       'tamsu':      {'link' : 'tamsu.hht',         'category': unicode('Tâm sự', 'utf-8'),     'tags': ['tam su']},
       'duhoc':      {'link' : 'duhoc.hht',         'category': unicode('Giáo dục', 'utf-8'),   'tags': ['du hoc', 'giao duc']},
       'vqanh':      {'link' : 'vuongquocanh.hht',  'category': unicode('Giáo dục', 'utf-8'),   'tags': ['du hoc', 'giao duc']},
       'nhip-song':  {'link' : 'nhipsong.hht',      'category': unicode('Công nghệ số', 'utf-8'),           'tags': ['cntt', 'cong nghe so']},
       'ngoilang':   {'link' : 'ngoilang.hht',      'category': unicode('Sức khỏe - Giới tính', 'utf-8'),   'tags': ['ngoi lang', 'tinh yeu', 'gioi tinh']},
       'la':         {'link' : 'la.hht',            'category': unicode('Chuyện lạ', 'utf-8'),              'tags': ['chuyen la', 'la', 'fun']},
       'ngoilang':   {'link' : 'ngoilang.hht',      'category': unicode('Sức khỏe - Giới tính', 'utf-8'),   'tags': ['ngoi lang', 'tinh yeu', 'gioi tinh']},
       'trasua':     {'link' : 'trasua.hht',        'category': unicode('Sức khỏe - Giới tính', 'utf-8'),   'tags': ['tra sua', 'tinh yeu', 'gioi tinh']},
       'cafe7':      {'link' : 'cafe7.hht',         'category': unicode('Sức khỏe - Giới tính', 'utf-8'),   'tags': ['cafe 7', 'tinh yeu', 'gioi tinh']},
       'h2tconcert': {'link' : 'h2teenconcert.hht', 'category': unicode('Giải trí', 'utf-8'),   'tags': ['concert', 'dien anh', 'giai tri']},
       'hhticon':    {'link' : 'hhticon.hht',       'category': unicode('Giải trí', 'utf-8'),   'tags': ['am nhac', 'dien anh', 'giai tri']},
       'am-thuc':    {'link' : 'esheep.hht',        'category': unicode('Ẩm thực', 'utf-8'),    'tags': ['am thuc', 'mon an']},
       'media':      {'link' : 'media.hht',         'category': unicode('Giải trí', 'utf-8'),   'tags': ['am nhac', 'dien anh', 'giai tri']},
       'megazia':    {'link' : 'megazia.hht',       'category': unicode('Giải trí', 'utf-8'),   'tags': ['am nhac', 'dien anh', 'giai tri']},
    }
    def __init__(self, server, port, database):
        CONNECT             = Connection(server, port)
        self.DB             = CONNECT[database]
    
    def getRootLftCategory(self, name):
        try:
            collection = self.DB['category']
            if name==None or name=='': return None, None 
            root        =   collection.find_one({'data': 'Tin tức'}, {'root_id': 1})
            if root==None: 
                result  =   collection.find_one({'data': name}, {'root_id': 1, 'lft': 1})
            else:  
                result  =   collection.find_one({'data': name, 'root_id': root['root_id']}, {'root_id': 1, 'lft': 1})
            if result==None: cprint('Category chưa tồn tại !', 'red'); return None, None
            else: return result['root_id'], result['lft']
        except:
            traceback.print_exc()
    
    def getDatetime(self, text):
       try:
           result = datetime.datetime.utcnow()
           if text==None or text=='': return result
           m        =   pyclib.regexString('(\d+)/(\d+)/(\d+)', text)
           if m:
               year = int(float(m.group(3))); month  = int(float(m.group(2))); day    = int(float(m.group(1))) 
               return datetime.datetime(year, month, day) + datetime.timedelta(seconds=time.timezone)
           return result
       except:
           traceback.print_exc()
    
    def checkArticleDuplicate(self, link, cat):
        ''' Kiểm tra trùng tin tức trong DB, trả vê 1 nếu trùng, 0 trong trường hợp ngược lại
        ''' 
        try:
            collection  =  self.DB['article']
            if link==None or link=='': return None, None
            m           =   pyclib.regexString('-(\d+)-(\d+).hht', link)
            if m: aId   =   '{0}-{1}-{2}'.format(cat, m.group(1), m.group(2)) 
            else: aId   =   pyclib.getMd5(link)   
            result      =   collection.find_one({'hashUrl' : aId, 'source': 'hoahoctro.vn'})
            if result!=None:  cprint('Tin tức đã tồn tại trong cơ sở dữ liệu', 'red'); return 1, aId
            return 0, aId 
        except:
            traceback.print_exc()
            return None, None
    
    def processArticle(self, link, cat):
        ''' Hàm xử lý chi tiết một tin tức, nếu tin đã tồn tại trong DB thì trả về: 1, ngược lại: 0
        '''
        try:
            global totalNewsCrawlered, totalNewsDuplicated
            if link==None or link=='': return
            root_id, lft    = self.getRootLftCategory(self.CATEGORIES[cat]['category'])
            if root_id==None: return
            tags = self.CATEGORIES[cat]['tags']
            check_exists, aId    =   self.checkArticleDuplicate(link, cat)
            if check_exists==1: totalNewsDuplicated += 1; return 1
            print ('----------------------------------------------------------------------')
            cprint('Process article: ' + link, 'yellow')
            title = thumbnail = description = ''; data = []; flgImg = False; flgStop = False
            postedDate = datetime.datetime.utcnow(); dateText = ''; cCaption = ''; listKeys = []
    
            tree            =   pyclib.getXMLTree(link)
            # remove all tags a class = newstitle
            aNode           =   tree.xpath('//div[@class="dvNoidungtin"]//a[@class="newstitle" or @id="ctl00_content_lnkTitle"]')
            if len(aNode) > 0: 
                for an in aNode: an.getparent().remove(an)
            contentNode     =   tree.xpath('//div[@class="dvNoidungtin"]')
            if len(contentNode) > 0: primaryNode =   contentNode[0]
            else: cprint('Sai XPath không thể lấy được nội dung.', 'red'); return
            chtml =  etree.tounicode(primaryNode, method='html')    
            data, imgs   = html2textlib.getContent(chtml, SITE_URL)
            for i in range(len(data)-1, 0, -1):
                if data[i].has_key('data'):
                    if pyclib.regexString(': (\d+)/(\d+)/(\d+)$', data[i]['data']): dateText = data[i]['data']; del data[i]; break    
            titleNode   = tree.xpath('//div[@class="newsdetail"]//div[@class="dvXemtin"]//span[@class="newstitle"]')
            if len(titleNode) > 0: title  = pyclib.getStringWithNode(titleNode[0])
            introNode   = tree.xpath('//div[@class="newsdetail"]//div[@class="dvXemtin"]//span[@class="newsdesc"]')
            if len(introNode) > 0: description = pyclib.getStringWithNode(introNode[0])
            postedDate  = self.getDatetime(dateText)
            numberNode    = tree.xpath('//div[@class="dvNoidungtin"]/*')
            queryXPath  = ''
            if len(numberNode)>0 and len(numberNode)<=2:
                queryXPath  = '{0}/{1}'.format('//div[@class="dvNoidungtin"]', numberNode[0].tag)
            else: cprint('Không lấy được nội dung tin do tin có cấu trúc khác.', 'red'); return
            if queryXPath=='': return
            listNode    = tree.xpath('{0}/*'.format(queryXPath))
            if len(listNode)==1:
                queryXPath = '{0}/{1}/*'.format(queryXPath, listNode[0].tag)
                listNode    = tree.xpath(queryXPath)
            elif len(listNode)==2: 
                queryCXPath  =  '{0}{1}'.format(queryXPath, '/div/*')
                listNode = tree.xpath(queryCXPath) 
                if len(listNode) < 1: queryCXPath  =  '{0}{1}'.format(queryXPath, '/span/*') 
                listNode = tree.xpath(queryCXPath) 
            imageOfArticle = {}
            for node in listNode:
                if node.tag == "img":
                    linkImage   = node.get('src')
                    if pyclib.getDatatypeName(linkImage)=='unicode': linkImage = linkImage.encode('utf-8')
                    if linkImage[:4]!='http':
                        if linkImage[:1]!='/': linkImage = '{0}/{1}'.format(SITE_URL, linkImage)
                        else: linkImage = '{0}{1}'.format(SITE_URL, linkImage)
                    linkSourceImage   = re.sub(r'\s', '', linkImage)
                    linkSourceImage   = re.sub(r'%20', '', linkSourceImage)
                    linkImage         = linkImage.replace('../', '')
                    result = None; source = file_name = ''; size = 0
                    if flgCopy!=None:
                        result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                    else:
                        result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                        if result!=None:
                            if flgImg==False: flgImg = True; thumbnail = source
                            imageOfArticle[linkSourceImage] = {'data': source, 'type': 'image', 'caption': ''}
                        else:
                            linkImage       = linkSourceImage
                            linkImage       = linkImage.replace('../', '')
                            result = None; source = file_name = ''; size = 0
                            if flgCopy!=None:
                                result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                            else:
                                result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                                if result!=None:
                                    if flgImg==False: flgImg = True; thumbnail = source
                                    imageOfArticle[linkSourceImage] = {'data': source, 'type': 'image', 'caption': ''}
                    continue
                imgNode  = node.xpath('.//img')
                if len(imgNode) > 0:
                    flgRun = False
                    tableNode   = node.xpath('.//table')
                    caption = ''
                    if len(tableNode) > 0: flgRun = True
                    iNode       = node.xpath('.//i')
                    if len(iNode) > 0: flgRun = True 
                    emNode      = node.xpath('.//em')
                    if len(emNode) > 0: flgRun = True
                    if node.tag=='table': flgRun = True
                    if flgRun:
                        caption     = pyclib.getStringWithNode(node)
                        if caption==None or len(caption)<2 or len(caption)>200: caption = ''
                    if len(imgNode)>=2: caption = ''
                    for img in imgNode:
                        linkImage   = img.get('src')
                        if pyclib.getDatatypeName(linkImage)=='unicode': linkImage = linkImage.encode('utf-8')
                        if linkImage[:4]!='http':
                            if linkImage[:1]!='/': linkImage = '{0}/{1}'.format(SITE_URL, linkImage)
                            else: linkImage = '{0}{1}'.format(SITE_URL, linkImage)
                        linkSourceImage   = re.sub(r'\s', '', linkImage)
                        linkSourceImage   = re.sub(r'%20', '', linkSourceImage)
                        linkImage         = linkImage.replace('../', '')
                        result = None; source = file_name = ''; size = 0
                        if flgCopy!=None:
                            result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                        else:
                            result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                            if result!=None:
                                if flgImg==False: flgImg = True; thumbnail = source
                                imageOfArticle[linkSourceImage] = {'data': source, 'type': 'image', 'caption': caption}
                            else:
                                linkImage  = linkSourceImage
                                linkImage  = linkImage.replace('../', '')
                                result = None; source = file_name = ''; size = 0
                                if flgCopy!=None:
                                    result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                                else:
                                    result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                                    if result!=None:
                                        if flgImg==False: flgImg = True; thumbnail = source
                                        imageOfArticle[linkSourceImage] = {'data': source, 'type': 'image', 'caption': caption}          
            flgLength = 0
            for i in range(0, len(data)):
                if data[i].has_key('src'): 
                    src_img =  data[i]['src'];
                    if imageOfArticle.has_key(src_img):
                        data[i] = imageOfArticle[src_img]
                        cCaption = data[i]['caption'];  cprint(data[i]['data'], 'yellow')
                        print 'Caption: ', cCaption
                        flgLength = len(cCaption)
                    else: listKeys.append(i)
                else:
                    if len(cCaption)>1: 
                        cData = data[i]['data']; 
                        if pyclib.getDatatypeName(cData)=='unicode': cData = cData.encode('utf-8')
                        cData = unicode(cData, 'utf-8')
                        if len(cCaption) <= len(cData): cData = cData.replace(cCaption, '')
                        else: flgLength -= len(cData); listKeys.append(i); continue
                        if len(cData)>1: data[i]['data'] = cData; print pyclib.toAscii(cData)
                        else: listKeys.append(i)
                        cCaption = ''
                    else:
                        if flgStop: listKeys.append(i); continue 
                        cData = data[i]['data']; 
                        if pyclib.getDatatypeName(cData)=='unicode': cData = cData.encode('utf-8')
                        cData = unicode(cData, 'utf-8')
                        if flgLength > 0: 
                            if len(cData)<=(flgLength+2): listKeys.append(i); flgLength = 0; continue
                            flgLength = 0
                        if len(cData)<2: listKeys.append(i); continue
                        if cData==unicode('Hoa Học Trò Online', 'utf-8'): flgStop = True
                        if data[i]['type']=='textbold': print pyclib.toAscii(cData)
                        else: print pyclib.toAscii(cData)
                print '-----------------'
            for key in range(len(listKeys)-1, -1, -1): del data[listKeys[key]]
            if len(thumbnail) < 2:
                thumbNode   =   tree.xpath('//div[@class="newsdetail"]//div[@class="dvXemtin"]//div[@class="dvChitiettin"]//img')
                if len(thumbNode) > 0:
                    linkImage   = thumbNode[0].get('src')
                    if pyclib.getDatatypeName(linkImage)=='unicode': linkImage = linkImage.encode('utf-8')
                    if linkImage[:4]!='http':
                        if linkImage[:1]!='/': linkImage = '{0}/{1}'.format(SITE_URL, linkImage)
                        else: linkImage = '{0}{1}'.format(SITE_URL, linkImage)
                    linkImage = linkImage.replace('/..', '')
                    result = None; source = file_name = ''; size = 0
                    if flgCopy!=None:
                        result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                    else:
                        result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                        if result!=None: thumbnail = source
                        else: 
                            linkImage   = re.sub('\s', '', linkImage)
                            if flgCopy!=None:
                                result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                            else:
                                result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                                if result!=None: thumbnail = source
	    '''	
            print imgs
            for item in imageOfArticle: print item 
	    '''
            doc = ({ 'hashUrl'       :   aId,
                    'title'         :   title,
                    'thumbnail'     :   thumbnail,
                    'description'   :   description,
                    'content'       :   data,
                    'newsLink'      :   link,
                    'update'        :   postedDate,
                    'source'        :   'hoahoctro.vn',
                    'category'      :   lft,
                    'root'          :   root_id,
                    'is_active'     :   True,
                    'lastupdate'    :   datetime.datetime.utcnow(),
                    'timestamp'     :   time.time(),
                    'date'          :   datetime.datetime.utcnow(),
                    'tags'          :   tags, })
            print postedDate
            print aId
            print pyclib.toAscii('Title: ' + title)
            cprint('Thumbnail: ' + thumbnail, 'green')
            print pyclib.toAscii('Intro: ' + description)
            collection = self.DB['article']
            if len(data) > 0: totalNewsCrawlered += 1; collection.save(doc)
            else: cprint('XPath không đúng, không thể lấy được nội dung của tin.', 'red')
            return 0
        except:
            traceback.print_exc()
                    
    def getInfoPage(self, url):
        try:
            if url==None or url=='': return None, None
            maxPage     =   100
            tree        =   pyclib.getXMLTree(url)
            pageNode    =   tree.xpath('//div[@class="newsdetail"]//div/ul[@class="pager"]/li/a')
            if len(pageNode) > 1:
                href    =   pageNode[len(pageNode) -1].get('href')                
                m       =   pyclib.regexString('.hht/p(\d+)$', href)
                if m:   maxPage = int(float(m.group(1))) + 1
            return maxPage 
        except:
            traceback.print_exc()
                    
    def processPage(self, page, cat):
        try:
            gbcount     = loop = 0 
            lurl        =   '{0}/{1}/p{2}'.format(SITE_URL, self.CATEGORIES[cat]['link'], page)
            cprint('Process page : ' + lurl, 'yellow')
            tree        =   pyclib.getXMLTree(lurl)
            firstNode   =   tree.xpath('//div[@class="newsdetail"]/div[@class="firstArticle"]/a')
            if len(firstNode) > 0:
                count = self.processArticle('{0}{1}'.format(SITE_URL, firstNode[0].get('href')), cat)
                if count!=None: gbcount += count; loop += 1
            listNode    =   tree.xpath('//div[@class="newsdetail"]/table[@class="newslist"]//tr/td[2]/a')
            if len(listNode) < 1: return None, None
            for node in listNode:  
                count = self.processArticle('{0}{1}'.format(SITE_URL, node.get('href')), cat)
                if count!=None: gbcount += count; loop += 1
            return gbcount, loop
        except:
            traceback.print_exc(); return None, None
            
    def processCategory(self, cat):
        try:
            gbcount = 0; loop = 0
            cprint('Process category : ' + self.CATEGORIES[cat]['link'], 'yellow')
            lurl = '{0}/{1}'.format(SITE_URL, self.CATEGORIES[cat]['link'])
            maxPage = self.getInfoPage(lurl)
            for page in range(0, maxPage):
                print 'COUNT, LOOP : (', gbcount, ', ', loop, ')' 
                if gbcount>MAX_COUNT or loop>MAX_ARTICLE:
                    cprint('Dừng xử lý do vượt quá số lượng trùng lặp cho phép hoặc vượt quá số lượng tin cần lấy.')
                    return 
                c, l = self.processPage(page, cat)
                if c!=None: gbcount += c; loop += l
        except:
            traceback.print_exc()

def timeOut():        
    global totalNewsCrawlered, totalNewsDuplicated
    while True:
        delta = time.time() - start
        if delta > 900:
            print 'Dừng chương trình vì vượt quá thời gian chạy.', datetime.datetime.now()
            if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                logger.critical(unicode("crawler tin tức hoahoctro.vn không hoạt động", 'utf8'))
            else:
                logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
            logger.info('process timeout {0}'.format(delta))
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        time.sleep(30)

def forceQuit():
    try:
        print 'Finished.', datetime.datetime.now()
        pid = os.getpid(); os._exit(1); os.kill(pid, 9)
    except:
        traceback.print_exc()
        
if __name__ == '__main__':
    try:
        cprint('start crawler hoahoctro.vn', 'yellow')
        totalNewsCrawlered = 0; totalNewsDuplicated = 0
        start = time.time() 
        timeout = threading.Thread(target=timeOut).start()
        crawler = Hoahoctro(MONGO_SERVER, MONGO_PORT, DATABASE)
        pool = workerpool.WorkerPool(size=1)
        pool.map(crawler.processCategory, crawler.CATEGORIES.keys())
        pool.shutdown(); pool.wait()
        if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
            logger.critical(unicode("crawler tin tức hoahoctro.vn không hoạt động", 'utf8'))
        else:
            logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
        logger.info('finished crawler hoahoctro.vn')
        if flgCopy!=None: ssh.close()
        forceQuit()
    except:
        traceback.print_exc()
