# -*- coding: utf-8 -*-
''' @author: Dinh Manh Dau <dinhmanhdau@gmail.com>
'''
import pycommonlib as pyclib
import workerpool
import traceback
import datetime, time
import re, os
import html2textlib
import threading

from pymongo    import Connection
from termcolor  import cprint
from lxml       import etree

LOCAL_PATH          = '/home/hoangnamhai/HarvestedData/tintuc/news'
MONGO_SERVER        = '27.0.12.106'   
#MONGO_SERVER        = 'beta.mana.vn'   
MONGO_PORT          = 27017
DATABASE            = 'tintuc_v2'
PREFIX              = '/uploads/news' 
SITE_URL            = 'http://kenh14.vn'
MAX_COUNT           = 15
MAX_ARTICLE         = 30
MAX_PAGE            = 20
os.umask(0000)

logger = pyclib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'kenh14.vn')
totalNewsCrawlered = 0; totalNewsDuplicated = 0
flgCopy             = pyclib.getArgs()
ssh = None; sftp = None
if flgCopy!=None:
    ssh     = pyclib.createSSHClient('mana.vn', 22, 'daudm', '')
    sftp    = ssh.open_sftp()
    if ssh==None:
        if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
            logger.critical(unicode("crawler tin tức kenh14.vn không hoạt động", 'utf8'))
            pyclib.forceQuit()
start = 0

class Kenh14():
    CATEGORIES = { 
       'star':       {'link' : 'ngoi-sao',   'category': unicode('Giải trí', 'utf-8'), 'tags': ['giai tri']},
       'phim-anh':   {'link' : 'phim-anh',   'category': unicode('Giải trí', 'utf-8'), 'tags': ['phim anh', 'dien anh', 'giai tri']},
       'am-nhac':    {'link' : 'am-nhac',    'category': unicode('Giải trí', 'utf-8'), 'tags': ['am nhac', 'dien anh', 'giai tri']},
       'hoc-duong':  {'link' : 'hoc-duong',  'category': unicode('Giáo dục', 'utf-8'), 'tags': ['giao duc']},
       'the-thao':   {'link' : 'sport',      'category': unicode('Thể thao', 'utf-8'),  'tags': ['the thao']},
       'chuyen-la':  {'link' : 'chuyen-la',  'category': unicode('Chuyện lạ', 'utf-8'),        'tags': ['chuyen la', 'fun']},
       'doi-song':   {'link' : 'doi-song',   'category': unicode('Văn hóa - Xã hội', 'utf-8'), 'tags': ['doi song', 'xa hoi']},
       'cong-nghe':  {'link' : 'cong-nghe',  'category': unicode('Công nghệ số', 'utf-8'),     'tags': ['cntt', 'cong nghe so']},
       'thoi-trang': {'link' : 'thoi-trang', 'category': unicode('Làm đẹp - Thời trang', 'utf-8'), 'tags': ['thoi trang', 'fashion']},
       'qua-tang':   {'link' : 'qua-tang',   'category': unicode('Sức khỏe - Giới tính', 'utf-8'), 'tags': ['kheo tay']},       
       'gioi-tinh':  {'link' : 'gioi-tinh',  'category': unicode('Sức khỏe - Giới tính', 'utf-8'), 'tags': ['gioi tinh', 'tinh yeu']}, 
       'teeniscover':   {'link' : 'teeniscover',    'category': unicode('Giải trí', 'utf-8'), 'tags': ['teen ', 'giai tri']},
       'made-by-teens': {'link' : 'made-by-teens',  'category': unicode('Làm đẹp - Thời trang', 'utf-8'), 'tags': ['qua tang', 'thoi trang']},
    }

    def __init__(self, server, port, database):
        CONNECT             = Connection(server, port)
        self.DB             = CONNECT[database]
    
    def getRootLftCategory(self, name):
        try:
            collection = self.DB['category']
            if name==None or name=='': return None, None 
            root        =   collection.find_one({'data': 'Tin tức'}, {'root_id': 1})
            if root==None: 
                result  =   collection.find_one({'data': name}, {'root_id': 1, 'lft': 1})
            else:  
                result  =   collection.find_one({'data': name, 'root_id': root['root_id']}, {'root_id': 1, 'lft': 1})
            if result==None: cprint('Category chưa tồn tại !', 'red'); return None, None
            else: return result['root_id'], result['lft']
        except:
            traceback.print_exc()
    
    def getDatetime(self, text):
       try:
           result = datetime.datetime.utcnow()
           if text==None or text=='': return result
           m        =   pyclib.regexString('(\d+):(\d+):(\d+) (\d+)/(\d+)/(\d+)', text)
           if m:
               year = int(float(m.group(6))); month  = int(float(m.group(5))); day    = int(float(m.group(4)))
               hour = int(float(m.group(1))); minute = int(float(m.group(2))); second = int(float(m.group(3))) 
               return datetime.datetime(year, month, day, hour, minute, second) + datetime.timedelta(seconds=time.timezone)
           return result
       except:
           traceback.print_exc()
   
    def checkLinkImage(self, link):
        try:
            if link==None or link=='': return False
            m   = pyclib.regexString('/EmoticonOng/', link)
            if m: return True
            return False
        except:
            traceback.print_exc()

    def checkArticleDuplicate(self, link):
        ''' Kiểm tra trùng tin tức trong DB, trả vê 1 nếu trùng, 0 trong trường hợp ngược lại
        ''' 
        try:
            collection  =  self.DB['article']
            if link==None or link=='': return None, None
            m           =   pyclib.regexString('/([a-z0-9]{1,3})/(\d+)/', link)
            if m: aId   =   '{0}-{1}'.format(m.group(1), m.group(2)) 
            else: aId   =   pyclib.getMd5(link)   
            result      =   collection.find_one({'hashUrl' : aId, 'source': 'kenh14.vn'})
            if result!=None:  cprint('Tin tức đã tồn tại trong cơ sở dữ liệu', 'red'); return 1, aId
            return 0, aId 
        except:
            traceback.print_exc()
            return None, None
    
    def processArticle(self, link, cat):
        ''' Hàm xử lý chi tiết một tin tức, nếu tin đã tồn tại trong DB thì trả về: 1, ngược lại: 0
        '''
        try:
            global totalNewsCrawlered, totalNewsDuplicated
            if link==None or link=='': return
            root_id, lft    = self.getRootLftCategory(self.CATEGORIES[cat]['category'])
            if root_id==None: return
            tags = self.CATEGORIES[cat]['tags']
            check_exists, aId    = self.checkArticleDuplicate(link)
            if check_exists==1: totalNewsDuplicated += 1; return 1
            print ('########################################################################################')
            print pyclib.toAscii('Process article: ' + link)

            title = thumbnail = description = ''; data = []; flgImg = False; imageOfArticle = {}; flgStop = False
            postedDate = datetime.datetime.utcnow(); cCaption = ''; listKeys = []; caption=''
            tree            =   pyclib.getXMLTree(link)
            contentNode     =   tree.xpath('//div[@class="wrapper"]//div[@class="postpadding"]') 
            if len(contentNode) <= 0: cprint('Tin có cấu trúc khác, không thể lấy được nội dung', 'red'); return
            primaryNode     =   contentNode[0]
            titleNode       =   primaryNode.xpath('.//h1')
            if len(titleNode) > 0:  title = pyclib.getStringWithNode(titleNode[0]); titleNode[0].getparent().remove(titleNode[0])
            dateNode        =   primaryNode.xpath('.//div[@class="meta"]/span[@class="date"]')
            if len(dateNode) > 0:   
                dateArticle = pyclib.getStringWithNode(dateNode[0]); dateNode[0].getparent().remove(dateNode[0])
            timeNode        =   primaryNode.xpath('.//div[@class="meta"]/span[@class="time"]')
            if len(timeNode) > 0:   
                timeArticle = pyclib.getStringWithNode(timeNode[0]); timeNode[0].getparent().remove(timeNode[0])
            
            postedDate  =  self.getDatetime('{0} {1}'.format(timeArticle, dateArticle))
            introNode       =   primaryNode.xpath('.//p[@class="sapo"]')
            if len(introNode) > 0:  description = pyclib.getStringWithNode(introNode[0]); introNode[0].getparent().remove(introNode[0])
            # remove all tag style and script
            listStyle       =   primaryNode.xpath('.//style')
            if len(listStyle) > 0:
                for style in listStyle: style.getparent().remove(style)
            listScript      =   primaryNode.xpath('.//script')
            if len(listScript) > 0:
                for script in listScript: script.getparent().remove(script)
            contentNode        =   primaryNode.xpath('.//div[@class="content"]')
            if len(contentNode) < 1: return
            chtml =  etree.tounicode(contentNode[0], method='html')    
            data, imgs   = html2textlib.getContent(chtml, SITE_URL, output=False, stdOut=False)

            # Lấy ảnh trước và remove node không cần thiết
            listNode        =   primaryNode.xpath('.//div[@class="content"]/*')
            if len(listNode)==1: 
                if listNode[0].tag == 'table':
                    listNode        =   primaryNode.xpath('.//div[@class="content"]/table//tr')
                else:
                    listNode        =   primaryNode.xpath('.//div[@class="content"]/div/*')
                    if len(listNode)<1:
                        listNode        =   primaryNode.xpath('.//div[@class="content"]/span/*')
            for node in listNode:
                if node.tag == "img":
                    linkImage   = node.get('src')
                    if self.checkLinkImage(linkImage): continue
                    if pyclib.getDatatypeName(linkImage)=='unicode': linkImage = linkImage.encode('utf-8')
                    if linkImage[:4]!='http':
                        if linkImage[:1]!='/': linkImage = '{0}/{1}'.format(SITE_URL, linkImage)
                        else: linkImage = '{0}{1}'.format(SITE_URL, linkImage)
                    linkSourceImage   = re.sub(r'\s', '', linkImage)
                    linkSourceImage   = re.sub(r'%20', '', linkSourceImage)
                    linkImage         = linkImage.replace('../', '')
                    result = None; source = file_name = ''; size = 0
                    if flgCopy!=None:
                        result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                    else:
                        result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                    if result!=None:
                        if flgImg==False: flgImg = True; thumbnail = source
                        imageOfArticle[linkSourceImage] = {'data': source, 'type': 'image', 'caption': ''}
                    continue
                imgNode  = node.xpath('.//img')
                if len(imgNode) > 0:
                    caption     = pyclib.getStringWithNode(node)
                    if caption==None or len(caption)<2: captien = ''
                    childNode   = node.xpath('./*')
                    flgCImg = False
                    for child in childNode:
                        if child.tag == "img": flgCImg = True; break
                        strNode  = pyclib.getStringWithNode(child)  
                        if len(strNode)>1: break;       

                    if flgCImg==False: caption = ''
                    if len(imgNode)>=2: caption = ''
                    for img in imgNode:
                        linkImage   = img.get('src')
                        cNode       = node.xpath('.//span')   
                        if len(cNode) < 1: caption = ''
                        if self.checkLinkImage(linkImage): continue
                        if pyclib.getDatatypeName(linkImage)=='unicode': linkImage = linkImage.encode('utf-8')
                        if linkImage[:4]!='http':
                            if linkImage[:1]!='/': linkImage = '{0}/{1}'.format(SITE_URL, linkImage)
                            else: linkImage = '{0}{1}'.format(SITE_URL, linkImage)
                        linkSourceImage   = re.sub(r'\s', '', linkImage)
                        linkSourceImage   = re.sub(r'%20', '', linkSourceImage)
                        linkImage         = linkImage.replace('../', '')
                        result = None; source = file_name = ''; size = 0
                        if flgCopy!=None:
                            result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                        else:
                            result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                        if result!=None:
                            if flgImg==False: flgImg = True; thumbnail = source
                            imageOfArticle[linkSourceImage] = {'data': source, 'type': 'image', 'caption': caption}

            flgLength = 0
            for i in range(0, len(data)):
                if data[i].has_key('src'): 
                    src_img =  data[i]['src'];
                    if imageOfArticle.has_key(src_img):
                        data[i] = imageOfArticle[src_img]
                        cCaption = data[i]['caption'];  print pyclib.toAscii(data[i]['data'])
                        print 'Caption: ', pyclib.toAscii(cCaption)
                        if pyclib.getDatatypeName(cCaption) == 'unicode': cCaption = cCaption.encode('utf-8')
                        flgLength = len(cCaption)
                    else: listKeys.append(i)
                else:
                    if len(cCaption)>1: 
                        cData = data[i]['data']; 
                        if pyclib.getDatatypeName(cData) == 'unicode': cData = cData.encode('utf-8')
                        if len(cCaption) <= len(cData): cData = cData.replace(cCaption, '')
                        else: flgLength -= len(cData); listKeys.append(i); continue
                        if len(cData)>1: data[i]['data'] = cData;  print pyclib.toAscii(data[i]['data'])
                        else: listKeys.append(i)
                        cCaption = ''
                    else:
                        if flgStop: listKeys.append(i); continue 
                        if flgLength > 0: 
                            if len(data[i]['data'])<=(flgLength+2): listKeys.append(i); flgLength = 0; continue
                            flgLength = 0
                        if len(data[i]['data'])<2: listKeys.append(i); continue
                        if data[i]['type']=='textbold': print pyclib.toAscii(data[i]['data'])
                        else: print pyclib.toAscii(data[i]['data'])
                print '-----------------'
            for key in range(len(listKeys)-1, -1, -1): del data[listKeys[key]]
            #In ra màn hình để kiểm tra ảnh
            #print imgs
            #for item in imageOfArticle: print item 
            title = pyclib.toUnicodeDungSan(title)
            doc = ({ 'hashUrl'       :   aId,
                    'title'         :   title,
                    'thumbnail'     :   thumbnail,
                    'description'   :   description,
                    'content'       :   data,
                    'newsLink'      :   link,
                    'update'        :   postedDate,
                    'source'        :   'kenh14.vn',
                    'category'      :   lft,
                    'root'          :   root_id,
                    'is_active'     :   True,
                    'lastupdate'    :   datetime.datetime.utcnow(),
                    'timestamp'     :   time.time(),
                    'date'          :   datetime.datetime.utcnow(),
                    'tags'          :   tags, })
            collection  = self.DB['article']
            if len(data) > 0: totalNewsCrawlered += 1; collection.save(doc)
            else: cprint('XPath không đúng, không thể lấy được nội dung của tin.', 'red')
            print postedDate
            print aId
            if pyclib.getDatatypeName(title)=='unicode': title = title.encode('utf-8')
            print pyclib.toAscii('Title: ' + title)
            cprint('Thumbnail: ' + thumbnail, 'green')
            print pyclib.toAscii('Intro: ' + description)
            return 0
        except:
            traceback.print_exc()        
            
    def processPage(self, page, cat):
        try:
            gbcount     = loop = 0 
            lurl        =   '{0}/{1}/trang-{2}.chn'.format(SITE_URL, self.CATEGORIES[cat]['link'], page)
            print pyclib.toAscii('Process page : ' + lurl)
            tree        =   pyclib.getXMLTree(lurl)
            listNode    =   tree.xpath('//div[@class="wrapper"]//div[@class="listnews"]//div[@class="title"]/a')
            if len(listNode) < 1: return
            for node in listNode:  
                count = self.processArticle('{0}{1}'.format(SITE_URL, node.get('href')), cat)
                if count!=None: gbcount += count; loop += 1
            return gbcount, loop
        except:
            traceback.print_exc(); return None, None
            
    def processCategory(self, cat):
        try:
            gbcount = 0; loop = 0
            cprint('Process category : ' + self.CATEGORIES[cat]['link'], 'yellow')
            lurl = '{0}/{1}.chn'.format(SITE_URL, self.CATEGORIES[cat]['link'])
            for page in range(1, MAX_PAGE):
                print 'COUNT, LOOP : (', gbcount, ', ', loop, ')' 
                if gbcount>MAX_COUNT or loop>MAX_ARTICLE:
                    cprint('Dừng xử lý do vượt quá số lượng trùng lặp cho phép hoặc vượt quá số lượng tin cần lấy.')
                    return 
                c, l = self.processPage(page, cat)
                if c!=None: gbcount += c; loop += l
        except:
            traceback.print_exc()

def timeOut():        
    global totalNewsCrawlered, totalNewsDuplicated
    while True:
        delta = time.time() - start
        if delta > 900:
            print 'Dừng chương trình vì vượt quá thời gian chạy.', datetime.datetime.now()
            if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                logger.critical(unicode("crawler tin tức kenh14.vn không hoạt động", 'utf8'))
            else:
                logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
            logger.info('process timeout {0}'.format(delta))
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        time.sleep(30)
        
if __name__ == '__main__':
    try:
        cprint('start crawler kenh14.vn', 'yellow')
        totalNewsCrawlered = 0; totalNewsDuplicated = 0
        start = time.time() 
        timeout = threading.Thread(target=timeOut).start()
        crawler = Kenh14(MONGO_SERVER, MONGO_PORT, DATABASE)
        pool = workerpool.WorkerPool(size=1)
        pool.map(crawler.processCategory, crawler.CATEGORIES.keys())
        pool.shutdown(); pool.wait()
        if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
            logger.critical(unicode("crawler tin tức kenh14.vn không hoạt động", 'utf8'))
        else:
            logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
        logger.info('finished crawler kenh14.vn')
        if flgCopy!=None: ssh.close()
        pyclib.forceQuit()
    except:
        traceback.print_exc()
