# -*- coding: utf-8 -*-

import traceback, os, re
import commonlib, datetime, time
import workerpool
import html2dict
import mechanize
import logging
from MongoModel import MongoModel
from lxml import etree
from termcolor import cprint

MAX_PROCESSED = 15
MAX_DUPLICATED = 10
DBNAME = 'VOA_News'
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/VOA/Programs/'
#DOWNLOAD_PATH = '/home/tuandv/VOA/Programs/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
#MONGO_SERVER = 'localhost'    # Localhost
MONGO_PORT = 27017
PREFIX = '/uploads/VOA/Programs/'
logging.basicConfig()
logger = logging.getLogger('voa_news')
logger.setLevel(logging.DEBUG)

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9) # kill -9 pid tren linux
    
class NewsCrawler(MongoModel):
    
    rootUrl = 'http://www.voanews.com/learningenglish/programs/{0}/'
    categories = [{'id':'radio', 'name': 'Radio', 'tag': ['radio']}]

    def __init__(self, host='localhost', port=27017):
        MongoModel.__init__(self, host, port)
        
    def standardizeTimeStr(self, timeStr):
        try:
            timeStr = re.sub(r' \| ', ' ', timeStr+' 0:0:0')
            return datetime.datetime.strptime(timeStr, "%d %B %Y %H:%M:%S") + datetime.timedelta(seconds=time.timezone)
        except:
            cprint('ERROR: an error occured while standardizeTimeStr', 'red')
            return datetime.datetime.utcnow()
    
    def getCatId(self, catName):
        db = self.connection[DBNAME]
        collection = db['program_radio_cat']
        q = collection.find_one({'name': catName}, {'_id':1})
        if q:
            result = q['_id']
        else:
            collection.save({'name':catName})
            q = collection.find_one({'name': catName}, {'_id':1})
            result = q['_id']
        return result
    
    def parent(self, node, tag, maxLevel=3):
        ilv = 0
        pnode = node
        while pnode.getparent() != None and ilv <= maxLevel:
            ilv += 1
            if pnode.tag == tag: break
            pnode = pnode.getparent()
        return pnode
    
    def getImageAndCaption(self, contentNode, url):
        data = {}
        if contentNode == None: return data
        for img in contentNode.xpath(".//img"):
            src = commonlib.urlJoin(url, commonlib.getAttribText(img, 'src'))
            if src != '':
                captionText = ''
                try:
                    cnode = img.getnext().getnext()
                    captionText = commonlib.getElementText(cnode, descendant=1)
                except: pass
                data[hash(src)] = captionText if commonlib.wordCount(captionText) < 40 else '' 
        return data
    
    def identifyId(self, url):
        '''http://www.voanews.com/learningenglish/home/american-life/New-York-Exhibit-Shows-Great-Artists-as-Jewelers-131290624.html'''
        try:
            return "voa_se_{0}".format(commonlib.extractWithRegEx(r'-(\d+)\.html', url, 1))
        except:
            return ''
    
    def getContent(self, output, contentNode, url):
        def loaibo(matchStr, blackListWord=['']):
            for blw in blackListWord:
                if matchStr.startswith(blw): return True
                if re.search(blw, matchStr): return True
            return False
        tmpData = html2dict.html2text(etree.tounicode(contentNode), url)
        imageTable = self.getImageAndCaption(contentNode, url)
        print imageTable
        stepOver = False
        for i in range(len(tmpData)):
            if (stepOver): stepOver = False; continue
            item = tmpData[i]
            if item['type'] == 'image':
                hashItem = item['hash']
                try:
                    if ((imageTable[hashItem] != '') and (tmpData[i+1]['type'] != 'textbold')):
                        if tmpData[i+1]['data'] == imageTable[hashItem]:
                            stepOver = True
                            item['caption'] = imageTable[hashItem]
                        elif (len(tmpData[i+1]['data']) > len(imageTable[hashItem]) and tmpData[i+1]['data'].startswith(imageTable[hashItem])):
                            tmpData[i+1]['data'] = tmpData[i+1]['data'][len(imageTable[hashItem]):].strip()
                            item['caption'] = imageTable[hashItem]
                    del item['hash']
                except: pass
                src = commonlib.downloadNUpload(ssh, item['data'], DOWNLOAD_PATH, PREFIX)
                if src != '': item['data'] = src
                else: continue
            else:
                if loaibo(item['data'], []): continue
            output.append(item)
    
    def getUrlResponse(self, url):
        req = mechanize.Request(url)
        res = mechanize.urlopen(req)
        return res.geturl()
    
    def getDetailNews(self, url):
        #url = 'http://www.voanews.com/learningenglish/home/american-life/New-York-Exhibit-Shows-Great-Artists-as-Jewelers-131290624.html'
        logger.debug(unicode('start getNewsDetail(url={0})'.format(url), 'utf8'))
        data = {'postTime': '', 'content': [], 'audio':''}
        try:
            tree = commonlib.getXMLTree(url, outputHTML=False)
            if tree == '' or tree == None: return
            contentNode = tree.xpath("//div[@id='mainContent']")
            articleNode = tree.xpath("//div[@class='articleBody']")
            if len(contentNode) == 0: return
            postTime = commonlib.getElementText(tree.xpath("//div[@class='dateStamp']"), descendant=1)
            postTime = self.standardizeTimeStr(postTime)
            data['postTime'] = postTime
            content = []
            imageNode = tree.xpath("//div[@class='photo480px']/img")
            if len(imageNode) > 0:
                img = commonlib.getAttribText(imageNode[0], 'src')
                img = commonlib.urlJoin(url, img) if img != '' else ''
                if img != '':
                    img = commonlib.downloadNUpload(ssh, img, DOWNLOAD_PATH, PREFIX)
                    if img != '':
                        captionText = commonlib.getElementText(tree.xpath("//div[@id='photo480px']//div[@class='caption']"))
                        content.append({'type': 'image', 'data': img, 'caption': captionText})
            self.getContent(content, articleNode[0], url)
            data['content'] = content
            audioNode = tree.xpath("//div[@id='sideContent']//li[@class='audio']/a")
            audio = commonlib.getAttribText(audioNode[0], 'onclick')
            audio = 'http://{0}.Mp3'.format(commonlib.extractWithRegEx(r'http:\/\/(.+)\.Mp3', audio, 1))
            urlAudio = commonlib.downloadMp3NConvert(audio, DOWNLOAD_PATH, PREFIX)
            data['audio']=urlAudio
            cprint('Audio: {0}'.format(data['audio']), 'green')
            cprint('PostTime: {0}'.format(data['postTime']))
            print '--------------------------------------------------'
            # -------------------------------------------------------------------
            for item in content:
                if item['type'] == 'image':
                    print item['type'], ': ', item['data'], '-> ', item['caption']
                else:
                    print item['type'], ': ', item['data']
            # -------------------------------------------------------------------
        except:
            logger.error(traceback.format_exc())
        finally:
            return data
    
    def getListRadio(self, category):
        catId = category['id']
        logger.debug(unicode('start getListNews(catId={0})'.format(catId), 'utf8'))
        url = self.rootUrl.format(catId)
        try:
            db = self.connection[DBNAME]
            collection = db['program_radio']
            ra = collection.find_one({'hashDate': commonlib.getMD5Hash(datetime.datetime.now().strftime("%d-%m-%Y"))})
            if ra:
                return None
            itemQueue = []
            tree = commonlib.getXMLTree(url)
            if tree == '' or tree == None: return
            listXpath = [{
                'bound': "//div[@id='newsTabContent2']//table[@id='programTableRadio']//tbody/tr", 
                'name': ".//td[@class='programName']/a", 
                'description': ".//td[2]",
                'schedule':".//td/ul/li[contains(., 'Schedule')]",
                'duration':".//td/ul/li[contains(., 'Duration')]",
                'audio':".//td/ul//li/a"
            }]
            for xp in listXpath:
                for item in tree.xpath(xp['bound']):
                    name = commonlib.getElementText(item.xpath(xp['name']), descendant=1)
                    description = commonlib.getElementText(item.xpath(xp['description']))
                    schedule = commonlib.getElementText(item.xpath(xp['schedule']), descendant=1)
                    schedule = (schedule.split(':')[1]).strip()
                    duration = commonlib.getElementText(item.xpath(xp['duration']), descendant=1)
                    duration = (duration.split(':')[1]).strip()
                    audio = commonlib.getAttribText(item.xpath(xp['audio']), 'href')
                    audio = 'http://{0}.mp3'.format(commonlib.extractWithRegEx(r'http:\/\/(.+)\.mp3', audio, 1))
                    audio = commonlib.urlJoin(url, audio) if audio != '' else ''
                    urlAudio = commonlib.downloadMp3NConvert(audio, DOWNLOAD_PATH, PREFIX)
                    itemQueue.append({'name': name, 'description': description, 'schedule':schedule, 'duration':duration, 'audio':urlAudio})
            
            collection.save = {
                'hashDate': commonlib.getMD5Hash(datetime.datetime.now().strftime("%d-%m-%Y")),
                'is_active': True,
                'timestamp': time.time(),
                'date': datetime.datetime.utcnow(),
                'data': itemQueue
            }
        except:
            logger.error(traceback.format_exc())

def quitIfTimeout():
    print 'call quitIfTimeout'
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 2400:
                print 'process timeout {0}'.format(delta)
                print 'kill process {0}'.format(pid)
                os.system("kill -9 {0}".format(pid))
        except:
            print 'ERROR: could not kill python process with pid={0}'.format(pid)
        time.sleep(10)

if __name__ == '__main__':
    import threading
    import argparse
    lastaction = time.time()
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler VOA_RADIO")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    # -----------------------------
    
    #threading.Thread(target=quitIfTimeout).start()
    logger.info('start crawler VOA_RADIO')
    crawler = NewsCrawler(MONGO_SERVER, MONGO_PORT)
    try:
        pool = workerpool.WorkerPool(size=1)
        pool.map(crawler.getListRadio, crawler.categories)
        pool.shutdown()
        pool.wait()
    except:
        logger.error(traceback.format_exc())
    if ssh is not None: ssh.close()
    logger.info('finished crawler VOA_RADIO')
    forceQuit()
