# -*- coding: utf-8 -*-
import re, sys, os
import datetime, time
import traceback
import workerpool
import hashlib
import urlparse
import pycommonlib as pyclib
import html2textlib
import threading
import urllib

from pymongo    import Connection
from urlparse   import urljoin
from termcolor  import cprint 
from lxml       import etree

LOCAL_PATH      = '/home/hoangnamhai/HarvestedData/tintuc/news'
MONGO_SERVER    = 'beta.mana.vn'   
MONGO_PORT      = 27017
TINTUC_DB       = 'tintuc_v2'
PREFIX          = '/uploads/news' 
SITE_URL        = 'http://vietnamnet.vn'
MAX_COUNT       = 15
MAX_ARTICLE     = 50
MAX_PAGE        = 10
CONNECT         = Connection(MONGO_SERVER, MONGO_PORT)
DB              = CONNECT[TINTUC_DB]
COLLECTION_ARTICLE  = 'article'
COLLECTION_CATEGORY = 'category'
os.umask(0000)
flagIntro = False
IMG_LOGO        = 'http://img.vietnamnet.vn/logo.gif'
flgCopy             = pyclib.getArgs()
ssh = None; sftp = None
if flgCopy!=None:
    ssh     = pyclib.createSSHClient('mana.vn', 22, 'daudm', '')
    sftp    = ssh.open_sftp()
    if ssh==None: pyclib.forceQuit()
start = 0

CATEGORIES = {
    'xa-hoi'   : {'tags': ['xa hoi'], 'flag': False, 'category': unicode('Văn hóa - Xã hội', 'utf-8')},
    'giao-duc' : {'tags': ['giao duc'], 'flag': False,  'category': unicode('Giáo dục', 'utf-8')},
    'chinh-tri': {'tags': ['chinh tri'], 'flag': False, 'category': unicode('Văn hóa - Xã hội', 'utf-8')},
    'quoc-te'  : {'tags': ['quoc te'], 'flag': False, 'category': unicode('Thế giới', 'utf-8')},
    'van-hoa'  : {'tags': ['van-hoa'], 'flag': False, 'category': unicode('Văn hóa - Xã hội', 'utf-8')},
    'khoa-hoc' : {'tags': ['khoa-hoc'], 'flag': False,  'category': unicode('Khoa học', 'utf-8')},
    'ban-doc-phap-luat' : {'tags': ['phat luat'], 'flag': False, 'category': unicode('Tâm sự', 'utf-8')},
    'chuyen-dong-tre'   : {'tags': ['chuyen dong tre'], 'flag': False, 'category': unicode('Văn hóa - Xã hội', 'utf-8')},
    'cong-nghe-thong-tin-vien-thong' : {'tags': ['cntt vien thong'], 'flag': False, 'category': unicode('Công nghệ số', 'utf-8')},
#    'kinh-te'  : {'tags': ['kinh te'], 'flag': False, 'category': unicode('Kinh doanh', 'utf-8')},
}

REMOVETEXT = [
              unicode('Tin bài khác', 'utf-8'),
              unicode('TIN BÀI KHÁC', 'utf-8'),
              unicode('Tin bài liên quan', 'utf-8'),
              unicode('TIN BÀI LIÊN QUAN', 'utf-8'), 
              unicode('Tin liên quan', 'utf-8'), 
              unicode('TIN LIÊN QUAN', 'utf-8'), 
              unicode('Tin bài cùng chuyên mục', 'utf-8'),
              unicode('TIN BÀI CÙNG CHUYÊN MỤC', 'utf-8'),
          ]  

def getRootLft(cat):
    try:
        data = CATEGORIES[cat]['category']
        collection = DB[COLLECTION_CATEGORY]
        result = collection.find_one({'data': data})
        if result==None:
            cprint('Category chưa tồn tại !', 'red')
            return None, None
        else: return result['root_id'], result['lft']
    except:
        traceback.print_exc()
        return None, None

ROOTLEFT = {}
for key, value in CATEGORIES.iteritems():
    root_id, lft = getRootLft(key)
    print key, root_id, lft
    if root_id!=None: ROOTLEFT[key] = {'root_id': root_id, 'lft': lft}

def getDateFromArticle(t):
    ''' Hàm lấy datetime từ string
        Ex : Ngày 08/07/2011 09:12:33 AM (GMT+7)
        ->  2011-07-08 09:12:33 
    '''
    try:
        if t==None or t=='': return None
        preg = re.compile(r'(\d+)/(\d+)/(\d+) (\d+):(\d+):(\d+)')
        m    = preg.search(t)
        if m!='':
            result = datetime.timedelta(seconds=time.timezone) + datetime.datetime(int(m.group(3)), int(m.group(2)), int(m.group(1)), int(m.group(4)), int(m.group(5)), int(m.group(6)))
            return result
        return datetime.datetime.utcnow()
    except:
        traceback.print_exc()
        return None
    
def checkArticleDuplicate(link):
    try:
        if link==None or link=='': return
        collection  = DB[COLLECTION_ARTICLE]
        hashUrl     = hashlib.md5(link).hexdigest()  
        result      = collection.find_one({'hashUrl' : hashUrl})
        if result!=None:
            cprint('Tin tức đã tồn tại trong cơ sở dữ liệu', 'red'); return 1
        return hashUrl 
    except:
        traceback.print_exc()

def getArticleId(link):
    try:
        m = pyclib.regexString('/(\d+)/', link)
        if m: return m.group(1)
    except:
        traceback.print_exc()
                
def processArticle(link, cat):
    ''' Hàm xử lý một tin tức
        - Kiểm tra xem tin tức đã tồn tại hay chưa (1 đã tồn tại, 0 chưa tồn tại)
        - Lấy tiêu đề, ảnh (có thể không có)
    '''
    try:
        hashUrl = checkArticleDuplicate(link)
        if hashUrl==None or hashUrl==1: return hashUrl
        root_id = lft = 0; result = 0
        if ROOTLEFT.has_key(cat): root_id = ROOTLEFT[cat]['root_id']; lft = ROOTLEFT[cat]['lft']
        else: cprint('Chưa tồn tại category.', 'red'); return result
        print ('########################################################################################')
        cprint('URL Article: ' + pyclib.toAscii(link), 'yellow')

        title = thumbnail = description = ''; data = []; flgImg = False; imageOfArticle = {}; flgStop = False
        postedDate = datetime.datetime.utcnow(); cCaption = ''; listKeys = []; caption = ''; flgIntro = False

        tree        = pyclib.getXMLTree(link)
        oneNode     = tree.xpath('//div[@class="container"]//div[contains(@class, "columnsPage")]//div[@class="columnsPageLeft"]')
        if len(oneNode)<1: cprint('Sai xpath không thể lấy được nội dung của tin.', 'red'); return result
        primaryNode = oneNode[0]
        dateNode    = primaryNode.xpath('./div[@class="clearfix"]') 
        if len(dateNode): postedDate = getDateFromArticle(pyclib.getStringWithNode(dateNode[0]))
        titleNode   = primaryNode.xpath('./div[@class="articleDetailBox"]/h1')
        if len(titleNode)>0: 
            title   = pyclib.getStringWithNode(titleNode[0])
            title   = pyclib.toUnicodeDungSan(title)

        # remove table 
        tableNode    = primaryNode.xpath('./div[@class="articleDetailBox"]/div[@class="article_content"]//table[@class="quote center"]')
        for table in tableNode: table.getparent().remove(table)
        # Xóa tag a trong article
        removeNode  = primaryNode.xpath('./div[@class="articleDetailBox"]/div[@class="article_content"]//a')
        for taga in removeNode:
            taga.getparent().remove(taga)
            '''
            linka  = taga.get('href')
            checka = getArticleId(linka)
            if checka!=None: taga.getparent().remove(taga) 
            '''
        contentNode  = primaryNode.xpath('./div[@class="articleDetailBox"]/div[@class="article_content"]')
        if len(contentNode) < 1: return
        chtml =  etree.tounicode(contentNode[0], method='html')    
        data, imgs   = html2textlib.getContent(chtml, SITE_URL, explode='\n', output=False, stdOut=False)
        
        # Lấy ảnh vào caption
        imageNode    = primaryNode.xpath('./div[@class="articleDetailBox"]/div[@class="article_content"]//table[@class="image center"]')
        for node in imageNode:
            imgNode  = node.xpath('.//img') 
            if len(imgNode)<1: continue
            text_caption = ''
            linkImage   = imgNode[0].get('src')
            if type(linkImage).__name__=='unicode': linkImage = linkImage.encode('utf-8')
            result = None; source = file_name = ''; size = 0
            if flgCopy!=None:
                result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
            else:
                result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
            if result!=None:
                if flgImg==False: flgImg = True; thumbnail = source
                text_caption = pyclib.getStringWithNode(node)
                text_caption = pyclib.toUnicodeDungSan(text_caption)
                imageOfArticle[linkImage] = {'data': source, 'type': 'image', 'caption': text_caption}
                tmp = 'http:' + urllib.quote(linkImage[5:])
                imageOfArticle[tmp] = {'data': source, 'type': 'image', 'caption': text_caption}
        # Lấy ảnh no caption
        imageNode    = primaryNode.xpath('./div[@class="articleDetailBox"]/div[@class="article_content"]//img')
        for node in imageNode:
            linkImage   = node.get('src')
            if type(linkImage).__name__=='unicode': linkImage = linkImage.encode('utf-8')
            result = None; source = file_name = ''; size = 0
            if flgCopy!=None:
                result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
            else:
                result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
            if result!=None:
                if flgImg==False: flgImg = True; thumbnail = source
                text_caption = pyclib.getStringWithNode(node)
                text_caption = pyclib.toUnicodeDungSan(text_caption)
                if not imageOfArticle.has_key(linkImage):
                    imageOfArticle[linkImage] = {'data': source, 'type': 'image', 'caption': ''}
                tmp = 'http:' + urllib.quote(linkImage[5:])
                if not imageOfArticle.has_key(tmp):
                    imageOfArticle[tmp] = {'data': source, 'type': 'image', 'caption': ''}

        flgLength = 0
        for i in range(0, len(data)):
            if data[i].has_key('src'): 
                src_img =  data[i]['src'];
                if imageOfArticle.has_key(src_img):
                    data[i] = imageOfArticle[src_img]
                    cCaption = data[i]['caption'];  print pyclib.toAscii(data[i]['data'])
                    if type(cCaption).__name__ == 'unicode': cCaption = cCaption.encode('utf-8')
                    print 'Caption: ', pyclib.toAscii(cCaption)
                    flgLength = len(cCaption)
                else: listKeys.append(i)
            else:
                if len(cCaption)>1: 
                    cData = data[i]['data']; 
                    if type(cData).__name__ == 'unicode': cData = cData.encode('utf-8')
                    if len(cCaption) <= len(cData): 
                        cData = cData.replace(cCaption, '')
                    else: flgLength -= len(cData); listKeys.append(i); continue
                    if len(cData)>1: data[i]['data'] = cData; print pyclib.toAscii(data[i]['data'])
                    else: listKeys.append(i)
                    cCaption = ''
                else:
                    if flgStop: listKeys.append(i); continue 
                    cData = data[i]['data']
                    if type(cData).__name__ == 'unicode': cData = cData.encode('utf-8')
                    cData = unicode(cData, 'utf-8')
                    if flgLength > 0: 
                        if len(cData)<=(flgLength+2): listKeys.append(i); flgLength = 0; continue
                        flgLength = 0
                    if len(cData)<2: listKeys.append(i); continue
                    if cData==IMG_LOGO: listKeys.append(i); continue
                    for remove in REMOVETEXT:
                        cData = cData.replace(remove, '')
                    if len(cData)<2:  listKeys.append(i); continue
                    # Get description
                    if flgIntro==False: flgIntro = True; description = cData; listKeys.append(i); continue 
                    if data[i]['type']=='textbold': print pyclib.toAscii(cData)
                    else: print pyclib.toAscii(cData)
            print '-----------------'
        for key in range(len(listKeys)-1, -1, -1): del data[listKeys[key]]
       
        #print imgs, imageOfArticle
        # In kết quả ra màn hình                
        print 'Title: ', pyclib.toAscii(title)
        print 'Date: ', postedDate
        print 'Intro: ', pyclib.toAscii(description)
        print 'Thumbnail: ', thumbnail
        collection = DB[COLLECTION_ARTICLE]
        doc = {
            'hashUrl': hashUrl,
            'title': title,
            'root': root_id,
            'category': lft,
            'thumbnail': thumbnail,
            'description': description,
            'content': data,
            'newsLink': link,
            'timestamp': time.time(),
            'lastupdate': datetime.datetime.utcnow(),
            'date': datetime.datetime.utcnow(),
            'update': postedDate,
            'is_active': True,
            'source': 'vietnamnet.vn',
            'tags': CATEGORIES[cat]['tags']
        }
        if len(data) > 0: collection.save(doc); result = 1 
        else: cprint('Xpath sai không lấy được nội dung của tin.', 'red')
        return result
    except:
        traceback.print_exc()
        
def processCategory(cat):
    try:
        count = loop = 0
        for page in range(1, MAX_PAGE):
            link = '{0}/{1}/trang{2}/{3}'.format('http://vietnamnet.vn/vn', cat, page, 'index.html')
            tree = pyclib.getXMLTree(link)
            # Tất cả các link trong trang
            allNode = tree.xpath('//div[@class="container"]//div[contains(@class, "columnsPage")]//div[@class="columnsPageLeft"]//a')
            for i in range(1, len(allNode)-1):
                node = allNode[i]
                content = pyclib.getStringWithNode(node)
                if len(content)<1: continue
                link = urljoin(SITE_URL, node.get('href'))
                print cat, 'COUNT, LOOP :(', count, ', ', loop, ')'
                if count > MAX_COUNT or loop > MAX_ARTICLE:
                    cprint('Dừng xử lý vì trùng lặp vượt quá giới hạn hoặc quá số lượng tin cần lấy.', 'red'); return
                c = processArticle(link, cat)
                if c!=None: count += c; loop += 1
                else: 
                    retry = 0
                    while(retry < 3):        
                        try:
                            cprint('Lưu tin lần thứ ' + str(retry), 'red')
                            c = processArticle(link, cat)
                            if c!=None: count += c; loop += 1
                            retry += 1
                        except:
                            traceback.print_exc()
                            retry += 1 
    except:
        traceback.print_exc()

def timeOut():        
    while True:
        delta = time.time() - start
        if delta > 900:
            print 'Dừng chương trình vì vượt quá thời gian chạy.', datetime.datetime.now()
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        time.sleep(30)

if __name__ == '__main__':
    try:
        start = time.time() 
        timeout = threading.Thread(target=timeOut).start()
        pool = workerpool.WorkerPool(size=1)
        pool.map(processCategory, CATEGORIES.keys())
        pool.shutdown()
        pool.wait()
        print 'Finished.', datetime.datetime.now()
        if flgCopy!=None: ssh.close()
        pid = os.getpid(); os._exit(1); os.kill(pid, 9)  # force kill ~ kill -9 pid
    except:
        traceback.print_exc()

