# -*- coding: utf-8 -*-

import traceback, os, re
import commonlib, datetime, time
import workerpool
import html2dict
import mechanize
import logging
from MongoModel import MongoModel
from lxml import etree
from termcolor import cprint

MAX_PROCESSED = 15
MAX_DUPLICATED = 10
DBNAME = 'VOA_News'
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/VOA/SpecialEnglish/'
#DOWNLOAD_PATH = '/home/tuandv/VOA/SpecialEnglish/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
#MONGO_SERVER = 'localhost'    # Localhost
MONGO_PORT = 27017
PREFIX = '/uploads/VOA/SpecialEnglish/'
logging.basicConfig()
logger = logging.getLogger('voa_news')
logger.setLevel(logging.DEBUG)

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9) # kill -9 pid tren linux
    
class NewsCrawler(MongoModel):
    
    rootUrl = 'http://www.voanews.com/learningenglish/home/{0}'
    categories = [
        {'id':'usa', 'name': 'U.S.A', 'tag': ['usa'], 'order':1},
        {'id':'world', 'name': 'World', 'tag': ['world'], 'order':2},
        {'id':'us-history', 'name': 'U.S History', 'tag': ['us-history'], 'order':3},
        {'id':'american-life', 'name': 'American Life', 'tag': ['american-life'], 'order':4},
        {'id':'american-life/people/', 'name': 'American Life', 'tag': ['american-life','people'], 'order':4},
        {'id':'american-life/places/', 'name': 'American Life', 'tag': ['american-life','places',], 'order':4},
        {'id':'american-life/literature/', 'name': 'American Life', 'tag': ['american-life','literature'], 'order':4},
        {'id':'arts-entertainment', 'name': 'Arts and Entertainment', 'tag': ['arts-entertainment'], 'order':5},
        {'id':'health', 'name': 'Health', 'tag': ['health'], 'order':6},
        {'id':'education', 'name': 'Education', 'tag': ['education'], 'order':7},
        {'id':'business', 'name': 'Business', 'tag': ['business'], 'order':8},
        {'id':'agriculture', 'name': 'Agriculture', 'tag': ['agriculture'], 'order':9},
        {'id':'science-technology', 'name': 'Science and Technology', 'tag': ['science-technology'], 'order':10},
    ]

    def __init__(self, host='localhost', port=27017):
        MongoModel.__init__(self, host, port)
        
    def standardizeTimeStr(self, timeStr):
        try:
            timeStr = re.sub(r' \| ', ' ', timeStr+' 0:0:0')
            return datetime.datetime.strptime(timeStr, "%d %B %Y %H:%M:%S") + datetime.timedelta(seconds=time.timezone)
        except:
            cprint('ERROR: an error occured while standardizeTimeStr', 'red')
            return datetime.datetime.utcnow()
    
    def getCatId(self, cat):
        catName = cat['name']
        db = self.connection[DBNAME]
        collection = db['special_english_cat']
        q = collection.find_one({'name': catName}, {'_id':1})
        if q:
            result = q['_id']
        else:
            collection.save({'name':catName, 'order':cat['order']})
            q = collection.find_one({'name': catName}, {'_id':1})
            result = q['_id']
        return result
    
    def parent(self, node, tag, maxLevel=3):
        ilv = 0
        pnode = node
        while pnode.getparent() != None and ilv <= maxLevel:
            ilv += 1
            if pnode.tag == tag: break
            pnode = pnode.getparent()
        return pnode
    
    def getImageAndCaption(self, contentNode, url):
        data = {}
        if contentNode == None: return data
        for img in contentNode.xpath(".//img"):
            src = commonlib.urlJoin(url, commonlib.getAttribText(img, 'src'))
            if src != '':
                captionText = ''
                try:
                    cnode = img.getnext().getnext()
                    captionText = commonlib.getElementText(cnode, descendant=1)
                except: pass
                data[hash(src)] = captionText if commonlib.wordCount(captionText) < 40 else '' 
        return data
    
    def identifyId(self, url):
        '''http://www.voanews.com/learningenglish/home/american-life/New-York-Exhibit-Shows-Great-Artists-as-Jewelers-131290624.html'''
        return url
        try:
            return "voa_se_{0}".format(commonlib.extractWithRegEx(r'-(\d+)\.html', url, 1))
        except:
            return ''
    
    def getContent(self, output, contentNode, url):
        def loaibo(matchStr, blackListWord=['']):
            for blw in blackListWord:
                if matchStr.startswith(blw): return True
                if re.search(blw, matchStr): return True
            return False
        tmpData = html2dict.html2text(etree.tounicode(contentNode), url)
        imageTable = self.getImageAndCaption(contentNode, url)
        print imageTable
        stepOver = False
        for i in range(len(tmpData)):
            if (stepOver): stepOver = False; continue
            item = tmpData[i]
            if item['type'] == 'image':
                hashItem = item['hash']
                try:
                    if ((imageTable[hashItem] != '') and (tmpData[i+1]['type'] != 'textbold')):
                        if tmpData[i+1]['data'] == imageTable[hashItem]:
                            stepOver = True
                            item['caption'] = imageTable[hashItem]
                        elif (len(tmpData[i+1]['data']) > len(imageTable[hashItem]) and tmpData[i+1]['data'].startswith(imageTable[hashItem])):
                            tmpData[i+1]['data'] = tmpData[i+1]['data'][len(imageTable[hashItem]):].strip()
                            item['caption'] = imageTable[hashItem]
                    del item['hash']
                except: pass
                src = commonlib.downloadNUpload(ssh, item['data'], DOWNLOAD_PATH, PREFIX)
                if src != '': item['data'] = src
                else: continue
            else:
                if loaibo(item['data'], []): continue
            output.append(item)
    
    def getUrlResponse(self, url):
        req = mechanize.Request(url)
        res = mechanize.urlopen(req)
        return res.geturl()
    
    def getDetailNews(self, url):
        #url = 'http://www.voanews.com/learningenglish/home/american-life/New-York-Exhibit-Shows-Great-Artists-as-Jewelers-131290624.html'
        logger.debug(unicode('start getNewsDetail(url={0})'.format(url), 'utf8'))
        data = {'postTime': '', 'content': [], 'audio':''}
        try:
            tree = commonlib.getXMLTree(url, outputHTML=False)
            if tree == '' or tree == None: return
            contentNode = tree.xpath("//div[@id='mainContent']")
            articleNode = tree.xpath("//div[@class='articleBody']")
            if len(contentNode) == 0: return
            postTime = commonlib.getElementText(tree.xpath("//div[@class='dateStamp']"), descendant=1)
            postTime = self.standardizeTimeStr(postTime)
            data['postTime'] = postTime
            content = []
            imageNode = tree.xpath("//div[@class='photo480px']/img")
            if len(imageNode) > 0:
                img = commonlib.getAttribText(imageNode[0], 'src')
                img = commonlib.urlJoin(url, img) if img != '' else ''
                if img != '':
                    img = commonlib.downloadNUpload(ssh, img, DOWNLOAD_PATH, PREFIX)
                    if img != '':
                        captionText = commonlib.getElementText(tree.xpath("//div[@id='photo480px']//div[@class='caption']"))
                        content.append({'type': 'image', 'data': img, 'caption': captionText})
            self.getContent(content, articleNode[0], url)
            data['content'] = content
            audioNode = tree.xpath("//div[@id='sideContent']//li[@class='audio']/a[contains(., 'MP3')]")
            audio = commonlib.getAttribText(audioNode[0], 'onclick')
            audio = 'http://{0}.Mp3'.format(commonlib.extractWithRegEx(r'http:\/\/(.+)\.Mp3', audio, 1))
            urlAudio = commonlib.downloadMp3NConvert(audio, DOWNLOAD_PATH, PREFIX)
            data['audio']=urlAudio
            cprint('Audio: {0}'.format(data['audio']), 'green')
            cprint('PostTime: {0}'.format(data['postTime']))
            print '--------------------------------------------------'
            # -------------------------------------------------------------------
            for item in content:
                if item['type'] == 'image':
                    print item['type'], ': ', item['data'], '-> ', item['caption']
                else:
                    print item['type'], ': ', item['data']
            # -------------------------------------------------------------------
        except:
            logger.error(traceback.format_exc())
        finally:
            return data
    
    def getListNews(self, category):
        catId = category['id']
        logger.debug(unicode('start getListNews(catId={0})'.format(catId), 'utf8'))
        url = self.rootUrl.format(catId)
        try:
            db = self.connection[DBNAME]
            collection = db['special_english']
            url = self.rootUrl.format(catId)
            cat = self.getCatId(category)
            itemQueue = {}
            tree = commonlib.getXMLTree(url)
            if tree == '' or tree == None: return
            listXpath = [{'bound': "//div[@id='mainContent']//div[@class='mainHead']", 
                'link': ".//div[@class='topStory']/a",
                'thumbnail': ".//div[@class='topStory']/a/img", 
                'title': ".//div[@class='moreTopNews']//h2", 
                'description': ".//div[@class='moreTopNews']//p[@class='headlineSummary']"},
                {'bound': "//div[contains(@class,'listBoxTeaser')]", 
                 'link': "./h3/a",
                'thumbnail': "./a/img", 
                'title': "./h3/a", 
                'description': "./p"}]
            for xp in listXpath:
                for item in tree.xpath(xp['bound']):
                    title = commonlib.getElementText(item.xpath(xp['title']), descendant=1)
                    description = commonlib.getElementText(item.xpath(xp['description']), descendant=1)
                    link = commonlib.getAttribText(item.xpath(xp['link']), 'href')
                    link = commonlib.urlJoin(url, link) if link != '' else ''
                    id = self.identifyId(link)
                    print 'ID: {0}'.format(id)
                    if id == '' or link == '': continue
                    hashUrl = commonlib.getMD5Hash(id)
                    if collection.find_one({'hashUrl': hashUrl}):
                        cprint("WARNING: already existed in database !")
                        continue
                    thumbnail = ''
                    thumbnailNode = item.xpath(xp['thumbnail'])
                    if thumbnailNode != None:
                        thumbnail = commonlib.getAttribText(thumbnailNode, 'src')
                        thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                        if thumbnail != '': thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                    itemQueue[hashUrl] = {'link': link, 'hashUrl': hashUrl, 'thumbnail': thumbnail, 'title': title, 'description': description}
                
            for item in itemQueue.values():
                link = item['link']
                title = item['title']
                print '--------------------------------------------------'
                print 'Title: {0}'.format(title)
                cprint('Link: {0}'.format(link), 'cyan')
                print 'Thumbnail: {0}'.format(item['thumbnail'])
                print 'Description: {0}'.format(item['description'])
                detail = self.getDetailNews(link)
                if ((len(detail['content']) == 0) or (len(detail) == 0)): continue
                collection.save({'hashUrl': item['hashUrl'],
                    'title': title,
                    'thumbnail': item['thumbnail'],
                    'description': item['description'],
                    'content': detail['content'],
                    'newsLink': link,
                    'update': detail['postTime'],
                    'audio': detail['audio'],
                    'source': 'voanews.com',
                    'catId': cat,
                    'is_active': True,
                    'lastupdate': datetime.datetime.utcnow(),
                    'timestamp': time.time(),
                    'date': datetime.datetime.utcnow(),
                    })
        except:
            logger.error(traceback.format_exc())

def quitIfTimeout():
    print 'call quitIfTimeout'
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 2400:
                print 'process timeout {0}'.format(delta)
                print 'kill process {0}'.format(pid)
                os.system("kill -9 {0}".format(pid))
        except:
            print 'ERROR: could not kill python process with pid={0}'.format(pid)
        time.sleep(10)

if __name__ == '__main__':
    import threading
    import argparse
    lastaction = time.time()
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler VOA_SE")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    # -----------------------------
    
    threading.Thread(target=quitIfTimeout).start()
    logger.info('start crawler VOA_SE')
    crawler = NewsCrawler(MONGO_SERVER, MONGO_PORT)
    try:
        pool = workerpool.WorkerPool(size=2)
        pool.map(crawler.getListNews, crawler.categories)
        pool.shutdown()
        pool.wait()
    except:
        logger.error(traceback.format_exc())
    if ssh is not None: ssh.close()
    logger.info('finished crawler VOA_SE')
    forceQuit()
