# -*- coding: utf-8 -*-

import traceback, os, re
import commonlib, datetime, time
import workerpool
from MongoDbLog import MongoDbLog
from MongoModel import MongoModel
from termcolor import cprint

LOG = MongoDbLog('news.m.zing.vn', 'debug')
MAX_PROCESSED = 75
MAX_DUPLICATED = 30
DBNAME = 'tintuc_v2'
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
MONGO_PORT = 27017
PREFIX = '/uploads/news/'

iphoneUserAgent = 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16'


def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9) # kill -9 pid tren linux
    
class CrawlerNews(MongoModel):
    
    rootUrl = 'http://vnexpress.net/gl/{0}/?cboGuidPDA=1'
    categories = {'xa-hoi': {'name': 'Văn hóa - Xã hội', 'tag': ['xa hoi']},
        'the-gioi': {'name': 'Thế giới', 'tag': ['the gioi']},
        'kinh-doanh': {'name': 'Kinh doanh', 'tag': ['kinh doanh']},
        'phap-luat': {'name': 'Pháp luật', 'tag': ['phap luat']},
        'the-thao': {'name': 'Thể thao', 'tag': ['the thao']},
#        'ban-doc-viet/tam-su': {'name': 'Tâm sự', 'tag': ['tam su']},
        'khoa-hoc': {'name': 'Khoa học', 'tag': ['khoa hoc']},
        'vi-tinh': {'name': 'Công nghệ số', 'tag': ['cong nghe so']},
        'xa-hoi/giao-duc': {'name': 'Giáo dục', 'tag': ['giao duc']}
        }
    
    def __init__(self, host, port=27017):
        MongoModel.__init__(self, host, port)
        self.__alreadyDetected = False
        self.__step = 12

    def getLeftRootOfCategory(self, catName):
        db = self.connection[DBNAME]
        collection = db['category']
        q = collection.find_one({'data': catName}, {'lft': 1, 'root_id': 1})
        result = {'category': q['lft'], 'root': q['root_id']} if q else ''
        return result
    
    def standardizeTimeString(self, timeStr):
        timeStr = re.sub(r'\s+', ' ', timeStr).strip()
        try:
            return datetime.datetime.strptime(timeStr, '%H:%M  |  %d/%m/%Y') + datetime.timedelta(seconds=time.timezone)
        except:
            cprint('Có lỗi trong quá trình chuẩn hóa thời gian !!!', 'red')
            return datetime.datetime.utcnow()
    
    def identifyId(self, url):
        '''http://news.m.zing.vn/detail/view/cat/dep/id/a122371?ver=t'''
        pat = r'/id/[a-z]+(\d+)\?'
        return commonlib.extractWithRegEx(pat, url, 1)
    
    def hasImg(self, node):
        '''Kiểm tra xem một node có ảnh hay không, trả về true hoặc false và số ảnh trong node đó'''
        ret = False
        ic = 0
        for item in node.iterdescendants(tag='img'):
            if not ret: ret = True
            ic += 1
        return ret, ic
    
    def checkTextBold(self, node):
        if node.tag in ['b', 'strong', 'h1', 'h2', 'h3', 'h4']: return True
        f = False
        childNodes = node.getchildren() 
        if len(childNodes) == 1:
            if childNodes[0].tag in ['b', 'strong', 'h1', 'h2', 'h3', 'h4'] and childNodes[0].tail == None: f = True
        return f
    
    def parent(self, node, tag, maxLevel=3):
        ilv = 0
        pnode = node
        while pnode.getparent() != None and ilv <= maxLevel:
            ilv += 1
            if pnode.tag == tag: break
            pnode = pnode.getparent()
        return pnode
    
    def getDetailNews(self, url):
        data = {'content': [], 'author': ''}
        try:
            content = []
            while url != '':
                tree = commonlib.getXMLTree(url, userAgent=iphoneUserAgent)
                if tree == None or tree == '': return
                nextNode = tree.xpath("//font[contains(., 'Xem tiếp')]".decode('utf-8'))
                nextLink = ''
                if len(nextNode) > 0:
                    aNode = self.parent(nextNode[0], 'a')
                    link = commonlib.getAttribText(aNode, 'href')
                    nextLink = commonlib.urlJoin(url, link) if link != '' else ''
                    nextNode[0].clear()
                for item in tree.xpath("//td[@id='tdbody']/*[position()>2]"):
                    commonlib.cleanElementWithTag(item, ['script', 'style'])
                    if item.tag in ['script']: continue
                    if item.tag == 'div' and item.get('class', '') == 'fd' and item.get('style', '') == 'margin-top:10px;': break
                    if item.tag == 'div' and item.get('class', '') == 'box-item': break
                    if item.tag == 'p' and item.get('align', '') == 'right' and item.get('class', '') == 'Normal':
                        data['author'] = commonlib.getElementText(item, descendant=1); continue
                    hasCaptionText = False
                    if item.tag in ['table', 'div', 'p']:
                        fimg, cimg = self.hasImg(item)
                        if fimg == False and item.tag == 'table': continue
                        if cimg > 1 or item.tag in ['div', 'p']:
                            for ichild in item.xpath(".//img"):
                                imgSrc = commonlib.getAttribText(ichild, 'src')
                                imgSrc = commonlib.urlJoin(url, imgSrc) if imgSrc != '' else ''
                                imgSrc = commonlib.downloadImage(imgSrc, DOWNLOAD_PATH, PREFIX)
                                caption = ''
                                if imgSrc != '': 
                                    pnode = self.parent(ichild, 'tr', 4)
                                    pnode = pnode.getnext()
                                    fimg, icimg = self.hasImg(pnode)
                                    if fimg == False and icimg == 0:
                                        text = commonlib.stringify(pnode)
                                        caption = text
                                        if text != '': hasCaptionText = True
                                    content.append({'type': 'image', 'data': imgSrc, 'caption': caption})
                        elif cimg == 1 and item.tag == 'table':
                            imgSrc = commonlib.getAttribText(item.xpath(".//img"), 'src')
                            imgSrc = commonlib.urlJoin(url, imgSrc) if imgSrc != '' else ''
                            imgSrc = commonlib.downloadImage(imgSrc, DOWNLOAD_PATH, PREFIX)
                            if imgSrc != '':
                                text = commonlib.getElementText(item.xpath(".//tr[2]/td"), descendant=1)
                                if text != '': hasCaptionText = True
                                content.append({'type': 'image', 'data': imgSrc, 'caption': text})
                    if hasCaptionText: hasCaptionText = False; continue
                    text = commonlib.getElementText(item, descendant=1)
                    if text != '' and not text.startswith('>>'): 
                        if self.checkTextBold(item):
                            content.append({'type': 'textbold', 'data': text})
                        else:
                            content.append({'type': 'text', 'data': text})
                    if item.tail != None:
                        text = commonlib.getElementText(item, text=0, tail=1)
                        if text != '' and not text.startswith('>>'): content.append({'type': 'text', 'data': text})
                url = nextLink if nextLink != '' else ''
            data['content'] = content
            # ------------------------------------------------------------------------- 
            for line in content:
                if line['type'] == 'image':
                    print line['data'], '(', line['caption'], ')'
                else:
                    print line['type'], ': ', line['data']
            # -------------------------------------------------------------------------
        except:
            LOG.error(traceback.format_exc())
        finally:
            return data
            
    
    def getListNews(self, catId):
        LOG.debug('start getListNews(catId={0})'.format(catId))
        url = "http://vnexpress.net/gl/{0}/".format(catId)
        currentProcessed = 0
        currentDuplicated = 0
        try:
            db = self.connection[DBNAME]
            collection = db['article']
            cat = self.getLeftRootOfCategory(self.categories[catId]['name'])
            cprint("root={0}, left={1}".format(cat['root'], cat['category']), 'red')
            if cat == '':
                LOG.info('Chuyên mục "%s" không tồn tại !!!' % self.categories[catId]['name'])
                return
            tree = commonlib.getXMLTree(url, userAgent=iphoneUserAgent)
            stopProcess = False
            while True:
                if stopProcess == True: break
                if tree == None or tree == '': 
                    cprint('Error: Không build được tree', 'red'); break
                for item in tree.xpath("//td[@id='tdbody']/div[@class='fd'][position()<last()-1]"):
                    if (currentProcessed > MAX_PROCESSED or currentDuplicated > MAX_DUPLICATED):
                        if currentProcessed > MAX_PROCESSED: cprint('Chuyên mục {0} dừng vì vượt quá giới hạn {1} cho phép'.format(self.categories[catId]['name'], MAX_PROCESSED))
                        if currentDuplicated > MAX_DUPLICATED: cprint('Chuyên mục {0} dừng vì số tin trùng quá giới hạn {1} cho phép'.format(self.categories[catId]['name'], MAX_DUPLICATED))
                        stopProcess = True
                        break
                    title = commonlib.getElementText(item.xpath(".//a[@class='link-title']"))
                    link = commonlib.getAttribText(item.xpath(".//a[@class='link-title']"), 'href')
                    link = commonlib.urlJoin(url, link) if link != '' else ''
                    if link == '': continue
                    hashUrl = commonlib.getMD5Hash(link)
                    if collection.find_one({'hashUrl': hashUrl}) or collection.find_one({'title': title}):
                        currentDuplicated += 1
                        cprint('INFO: already existed in database !', 'yellow')
                        continue
                    thumbnail = commonlib.getAttribText(item.xpath(".//img"), 'src')
                    thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                    if thumbnail != '': thumbnail = commonlib.downloadImage(thumbnail, DOWNLOAD_PATH, PREFIX)
                    postDate = commonlib.getElementText(item.xpath(".//span[@class='dateTime']"), descendant=1)
                    postDate = self.standardizeTimeString(postDate)
                    description = commonlib.getElementText(item.xpath(".//p[@class='pLead']"))
                    # ------------------------------------------------
                    print '----------------------------------------------------------'
                    cprint('Chuyên mục {0}'.format(self.categories[catId]['name']), 'green')
                    print 'Title: ', title
                    print 'Description: ', description
                    print 'postDate: ', postDate
                    print 'Link: ', link
                    print 'Thumbnail: ', thumbnail
                    print '----------------------------------------------------------'
                    # ------------------------------------------------
                    currentProcessed += 1
                    data = self.getDetailNews(link)
                    if len(data['content']) == 0: continue
                    collection.save({'hashUrl': hashUrl,
                        'title': title,
                        'thumbnail': thumbnail,
                        'description': description,
                        'content': data['content'],
                        'newsLink': link,
                        'update': postDate,
                        'source': 'vnexpress.net',
                        'author': data['author'],
                        'category': cat['category'],
                        'root': cat['root'],
                        'is_active': True,
                        'lastupdate': datetime.datetime.utcnow(),
                        'timestamp': time.time(),
                        'date': datetime.datetime.utcnow(),
                        'tag': self.categories[catId]['tag']
                    })
                continueNode = tree.xpath("//td[@id='tdbody']/div[@class='fd'][position()=last()]//a")
                if len(continueNode) > 0:
                    qstr = commonlib.getAttribText(continueNode[0], 'href')
                    qstr = commonlib.extractWithRegEx(r"ShowNextFolderItem\('(\d+/\d+/\d{4} \d+:\d+:\d+).+'\)", qstr, 1)
                    if qstr != '': 
                        tree = commonlib.getXMLTree(url, {'cboGuidPDA': 1, 'd': qstr})
                        print '*****************************************'
                        cprint('DEBUG: goto next page url={0}'.format(url))
                        print '*****************************************'
                    else:
                        tree = None
        except:
            LOG.info(traceback.format_exc())
        
if __name__ == '__main__':
    
    LOG.info('start crawler vnexpress.net')
    crawler = CrawlerNews(MONGO_SERVER, MONGO_PORT)
#    crawler.getDetailNews('http://vnexpress.net/gl/the-thao/bong-da/2011/08/top-20-ban-hop-dong-tu-dau-he-2011/')
#    crawler.getListNews('xa-hoi')
    try:
        pool = workerpool.WorkerPool(size=8)
        pool.map(crawler.getListNews, crawler.categories.keys())
        pool.shutdown()
        pool.wait()
    except:
        LOG.error(traceback.format_exc())
    LOG.info('finished crawler vnexpress.net')
    forceQuit()