# -*- coding: utf-8 -*-
''' @author: Dinh Manh Dau <dinhmanhdau@gmail.com>
'''
import  workerpool
import  traceback
import  datetime, time
import  re, os

import  pycommonlib as pyclib
import  html2textlib
import  HTMLParser
import  cStringIO as StringIO
import  urllib
import  threading

from    pymongo     import  Connection
from    termcolor   import  cprint
from    urlparse    import  urlparse
from    lxml        import  etree

LOCAL_PATH          = '/home/hoangnamhai/HarvestedData/tintuc/dantri'
MONGO_SERVER        = 'beta.mana.vn'   
MONGO_PORT          = 27017
DATABASE            = 'tintuc_v2'
PREFIX              = '/uploads/dantri' 
SITE_URL            = 'http://dantri.com.vn'
MAX_COUNT           = 15
MAX_ARTICLE         = 30
CONNECT             = Connection(MONGO_SERVER, MONGO_PORT)
DB                  = CONNECT[DATABASE]
MAX_PAGE            = 20
ARTICLE_COLLECTION  = DB['article']
CATEGORY_COLLECTION = DB['category']
USER_COLLECTION     = DB['backend_user']
os.umask(0000)

logger = pyclib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'dantri.com.vn')
totalNewsCrawlered = 0; totalNewsDuplicated = 0
flgCopy             = pyclib.getArgs()
ssh = None; sftp = None
if flgCopy!=None:
    ssh     = pyclib.createSSHClient('mana.vn', 22, 'daudm', '')
    sftp    = ssh.open_sftp()
    if ssh==None: 
        if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
            logger.critical(unicode("crawler tin tức dantri.com.vn không hoạt động", 'utf8'))
            pyclib.forceQuit()
start = 0

CATEGORIES = { 'c20s20':     {'link' : 'xa-hoi',     'category': unicode('Văn hóa - Xã hội', 'utf-8'),  'tag': ['xa hoi']},    
               'c36s36':     {'link' : 'thegioi',    'category': unicode('Thế giới', 'utf-8'),          'tag': ['the gioi', 'quoc te']},       
               'c26s26':     {'link' : 'thethao',    'category': unicode('Thể thao', 'utf-8'),          'tag': ['the thao']},            
               'c25s25':     {'link' : 'giaoduc',    'category': unicode('Giáo dục', 'utf-8'),          'tag': ['giao duc']},
               'c23s23':     {'link' : 'giaitri',    'category': unicode('Giải trí', 'utf-8'),          'tag': ['giai tri']},  
               'c119s119':   {'link' : 'sucmanhso',  'category': unicode('Công nghệ số', 'utf-8'),      'tag': ['cong nghe so']},
               'c76s76':     {'link' : 'kinhdoanh',  'category': unicode('Kinh doanh', 'utf-8'),        'tag': ['kinh doanh']}, 
               'c111s111':   {'link' : 'otoxemay',   'category': unicode('Tiêu dùng', 'utf-8'),         'tag': ['oto xe may']}, 
               'c132s132':   {'link' : 'chuyenla',   'category': unicode('Chuyện lạ', 'utf-8'),         'tag': ['chuyen la']},
               'c7s7':       {'link' : 'suckhoe',    'category': unicode('Sức khỏe - Giới tính', 'utf-8'),          'tag': ['suc khoe']}, 
               'c135s135':   {'link' : 'nhipsongtre',        'category': unicode('Văn hóa - Xã hội', 'utf-8'),      'tag': ['nhip song tre']},  
               'c130s130':   {'link' : 'tinhyeu-gioitinh',   'category': unicode('Sức khỏe - Giới tính', 'utf-8'),  'tag': ['tinh yeu gioi tinh']},  
             }


def getRootLftCategory(name):
    try:
        if name==None or name=='': return None, None 
        root    = CATEGORY_COLLECTION.find_one({'data': 'Tin tức'}, {'root_id': 1})
        if root==None: 
            result  = CATEGORY_COLLECTION.find_one({'data': name}, {'root_id': 1, 'lft': 1})
        else:  
            result  = CATEGORY_COLLECTION.find_one({'data': name, 'root_id': root['root_id']}, {'root_id': 1, 'lft': 1})
        if result==None:
            logger.info(unicode('Category {0} chưa tồn tại !'.format(name), 'utf-8'))
            cprint('Category chưa tồn tại !', 'red'); return None, None
        else: return result['root_id'], result['lft']
    except:
        traceback.print_exc()

def getDateTime(text):
   try:
       result = datetime.datetime.utcnow()
       if text==None or text=='': return result
       m        = pyclib.regexString('(\d+)/(\d+)/(\d+) - (\d+):(\d+)', text)
       if m:
           year = int(float(m.group(3))); month  = int(float(m.group(2))); day = int(float(m.group(1)))
           hour = int(float(m.group(4))); minute = int(float(m.group(5))) 
           return datetime.datetime(year, month, day, hour, minute) + datetime.timedelta(seconds=time.timezone)
       return result
   except:
       traceback.print_exc()

def checkArticleDuplicate(link):
    ''' Kiểm tra trùng tin tức trong DB, trả vê 1 nếu trùng, 0 trong trường hợp ngược lại
    ''' 
    try:
        if link==None or link=='': return None, None
        m           = pyclib.regexString('/(\w+)-(\d+)/', link)
        if m: aId   = '{0}-{1}'.format(m.group(1), m.group(2)) 
        else: aId   = pyclib.getMd5(link)   
        result      = ARTICLE_COLLECTION.find_one({'hashUrl' : aId, 'source': 'dantri.com.vn'})
        if result!=None:  
            logger.warning('Already existed in database !!!')
            cprint('Tin tức đã tồn tại trong cơ sở dữ liệu', 'red'); return 1, aId
        return 0, aId 
    except:
        traceback.print_exc()
        return None, None

def getAuthor(name='crawler'):
    try:
        result = USER_COLLECTION.find_one({'username': name}, {})
        if result==None: result = USER_COLLECTION.find_one({'username': 'Hindua88'}, {})
        if result!=None:
            return result['_id']
    except:
        traceback.print_exc()

def processArticle(link, cat):
    ''' Hàm xử lý chi tiết một tin tức, nếu tin đã tồn tại trong DB thì trả về: 1, ngược lại: 0
    '''
    try:
        global totalNewsCrawlered, totalNewsDuplicated
        if link==None or link=='': return
        check_exists, aId    = checkArticleDuplicate(link)
        if check_exists==1: totalNewsDuplicated += 1; return 1
        print '#####################################################################################'
        print pyclib.toAscii('Process article: ' + link)
        root_id, lft = getRootLftCategory(CATEGORIES[cat]['category'])
        print 'Root_id, lft: ', root_id, lft
        if root_id==None: return
        tags                = CATEGORIES[cat]['tag']  

        title = thumbnail = description = ''; data = []; postedDate = datetime.datetime.utcnow() 
        flgImg = False; imageOfArticle = {}; cCaption = ''; listKeys = []; caption=''; flgStop = False

        tree                = pyclib.getXMLTree(link)
        contentNode         = tree.xpath('//div[@id="ctl00_IDContent_ctl00_divContent"]')
        if len(contentNode)==0: cprint('Sai xpath => không lấy được nội dung của tin.', 'red'); return
        primaryNode         = contentNode[0]
        dateNode            = primaryNode.xpath('.//div[@class="box26"]')
        if len(dateNode)>0:
            dtNode              = dateNode[0].xpath('./span')
            if len(dtNode)>0:
                postedDate      = getDateTime(pyclib.getStringWithNode(dtNode[0]))
            dateNode[0].getparent().remove(dateNode[0])
        titleNode           = primaryNode.xpath('.//div[@class="fon31 mt1"]')
        if len(titleNode) > 0:  title = pyclib.getStringWithNode(titleNode[0])
        introNode   = tree.xpath('.//div[@class="fon33 mt1"]')
        if len(introNode) > 0:
            aNode   = introNode[0].xpath('.//a')
            if len(aNode) > 0: 
                for an in aNode: an.getparent().remove(an)
            description = pyclib.getStringWithNode(introNode[0])

        cNode        = primaryNode.xpath('.//div[@class="fon34 mt3 mr2 fon43"]')
        if len(cNode) < 1: return
        chtml = etree.tounicode(cNode[0], method='html')    
        data, imgs   = html2textlib.getContent(chtml, SITE_URL, output=False, stdOut=False)
        
        listNode     = cNode[0].xpath('./*')
        for node in listNode:
            if node.tag == "img":
                linkImage   = node.get('src')
                result = None; source = file_name = ''; size = 0
                if flgCopy!=None:
                    result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                else:
                    result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                if result!=None:
                    if flgImg==False: flgImg = True; thumbnail = source
                    imageOfArticle[linkImage] = {'data': source, 'type': 'image', 'caption': ''}
                continue
            imgNode = node.xpath('.//img')  
            if len(imgNode)>0:
                if len(imgNode)==1:
                    caption     = pyclib.getStringWithNode(node) 
                    if caption==None or len(caption)>200: caption = ''
                    linkImage   = imgNode[0].get('src')
                    result = None; source = file_name = ''; size = 0
                    if flgCopy!=None:
                        result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                    else:
                        result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                    if result!=None:
                        if flgImg==False: flgImg = True; thumbnail = source
                        imageOfArticle[linkImage] = {'data': source, 'type': 'image', 'caption': caption}
                else:
                    for img in imgNode:
                        linkImage   = img.get('src')
                        result = None; source = file_name = ''; size = 0
                        if flgCopy!=None:
                            result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                        else:
                            result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                        if result!=None:
                            if flgImg==False: flgImg = True; thumbnail = source
                            imageOfArticle[linkImage] = {'data': source, 'type': 'image', 'caption': ''}

        flgLength = 0
        for i in range(0, len(data)):
            if data[i].has_key('src'): 
                src_img = data[i]['src'];
                if imageOfArticle.has_key(src_img):
                    data[i] = imageOfArticle[src_img]
                    cCaption = data[i]['caption'];  print pyclib.toAscii(data[i]['data'])
                    if type(cCaption).__name__ == 'unicode': cCaption = cCaption.encode('utf-8')
                    print 'Caption: ', cCaption
                    flgLength = len(cCaption)
                else: listKeys.append(i)
            else:
                if len(cCaption)>1: 
                    cData = data[i]['data']; 
                    if type(cData).__name__ == 'unicode': cData = cData.encode('utf-8')
                    if len(cCaption) <= len(cData): 
                        cData = cData.replace(cCaption, '')
                    else: flgLength -= len(cData); listKeys.append(i); continue
                    if len(cData)>1: data[i]['data'] = cData; print pyclib.toAscii(data[i]['data'])
                    else: listKeys.append(i)
                    cCaption = ''
                else:
                    if flgStop: listKeys.append(i); continue 
                    if flgLength > 0: 
                        if len(data[i]['data'])<=(flgLength+2): listKeys.append(i); flgLength = 0; continue
                        flgLength = 0
                    if len(data[i]['data'])<2: listKeys.append(i); continue
                    if data[i]['type']=='textbold': print pyclib.toAscii(data[i]['data'])
                    else: print pyclib.toAscii(data[i]['data'])
            print '-----------------'
        for key in range(len(listKeys)-1, -1, -1): del data[listKeys[key]]
        title = pyclib.toUnicodeDungSan(title)
        doc= ({ 'hashUrl'       :   aId,
                'title'         :   title,
                'thumbnail'     :   thumbnail,
                'description'   :   description,
                'content'       :   data,
                'newsLink'      :   link,
                'update'        :   postedDate,
                'source'        :   'dantri.com.vn',
                'category'      :   lft,
                'root'          :   root_id,
                'is_active'     :   True,
                'lastupdate'    :   datetime.datetime.utcnow(),
                'timestamp'     :   time.time(),
                'date'          :   datetime.datetime.utcnow(),
                'tags'          :   tags, })


        if len(data) > 0: totalNewsCrawlered += 1; ARTICLE_COLLECTION.save(doc)
        else: cprint('Không lấy được nội dung của tin.', 'red')

        print postedDate
        print aId
        print pyclib.toAscii('Title: ' + title)
        cprint('Thumbnail: ' + thumbnail, 'green')
        print pyclib.toAscii('Intro: ' + description)
        return 0
    except:
        traceback.print_exc()        
        
def processPage(page, cat):
    try:
        gbcount = loop = 0
        lurl        = '{0}/{1}/{2}/{3}-{4}.htm'.format(SITE_URL, cat,  CATEGORIES[cat]['link'], 'trang' , page)
        cprint('Process page : ' + lurl, 'yellow')
        tree        = pyclib.getXMLTree(lurl)
        listNode    = tree.xpath('//div[@class="wrapper"]//div[@class="mt3 clearfix"]/div/a[1]')
        if len(listNode) < 1: return
        for node in listNode:  
            count = processArticle('{0}{1}'.format(SITE_URL, node.get('href')), cat)
            if count!=None: gbcount += count; loop += 1
        return gbcount, loop
    except:
        traceback.print_exc(); return None, None
        
def processCategory(cat):
    try:
        gbcount = 0; loop = 0
        cprint('Process category : ' + CATEGORIES[cat]['link'], 'yellow')
        lurl = '{0}/{1}/{2}.htm'.format(SITE_URL, cat, CATEGORIES[cat]['link'])
        for page in range(1, MAX_PAGE):
            print 'COUNT, LOOP : (', gbcount, ', ', loop, ')' 
            if gbcount>MAX_COUNT or loop>MAX_ARTICLE:
                cprint('Dừng xử lý do vượt quá số lượng trùng lặp cho phép hoặc vượt quá số lượng tin cần lấy.')
                return 
            c, l = processPage(page, cat)
            if c!=None: gbcount += c; loop += l
    except:
        traceback.print_exc()

def forceQuit():
    try:
        print 'Finished.', datetime.datetime.now()
        pid = os.getpid(); os._exit(1); os.kill(pid, 9)
    except:
        traceback.print_exc()
        
def timeOut():        
    global totalNewsCrawlered, totalNewsDuplicated
    while True:
        delta = time.time() - start
        if delta > 900:
            print 'Dừng chương trình vì vượt quá thời gian chạy.', datetime.datetime.now()
            if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                logger.critical(unicode("crawler tin tức dantri.com.vn không hoạt động", 'utf8'))
            else:
                logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
            logger.info('process timeout {0}'.format(delta))
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        time.sleep(30)

if __name__ == '__main__':
    try:
        cprint('start crawler dantri.com.vn', 'yellow')
        logger.info('start crawler dantri.com.vn')
        totalNewsCrawlered = 0; totalNewsDuplicated = 0
        start = time.time() 
        timeout = threading.Thread(target=timeOut).start()
        pool = workerpool.WorkerPool(size=1)
        pool.map(processCategory, CATEGORIES.keys())
        pool.shutdown(); pool.wait()
        if flgCopy!=None: ssh.close()
        if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
            logger.critical(unicode("crawler tin tức dantri.com.vn không hoạt động", 'utf8'))
        else:
            logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
        logger.info('finished crawler dantri.com.vn')
        forceQuit()
    except:
        traceback.print_exc()
