# -*- coding: utf-8 -*-
import sys
sys.path.append('/home/crawler/src')
import readability
import commonlib
import traceback
import datetime
import time
import os
import html2dict as html2text
import pprint
import re
from lxml import etree
from MongoModel import MongoModel
import workerpool

logger = commonlib.getLogger('tuoitrenews')
MONGO_SERVER = 'beta.mana.vn'
MONGO_PORT = 27017
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news/'
PREFIX = '/uploads/news/'
MAX_PROCESSED = 15
MAX_DUPLICATED = 10
DBNAME = 'englishnews'



class Crawler(MongoModel):
    
    category = [['Politics', 'http://www.vietnewsonline.vn/RssView.aspx?ChannelID=7'], 
                ['Society', 'http://www.vietnewsonline.vn/RssView.aspx?ChannelID=8'],
                ['Features', 'http://www.vietnewsonline.vn/RssView.aspx?ChannelID=9'],
                ['Lifestyle', 'http://www.vietnewsonline.vn/RssView.aspx?ChannelID=10'],
                ['Sports', 'http://www.vietnewsonline.vn/RssView.aspx?ChannelID=11]'],
                ['International', 'http://www.vietnewsonline.vn/RssView.aspx?ChannelID=12'],
                ['Business', 'http://www.vietnewsonline.vn/RssView.aspx?ChannelID=4'],
                ['Opinion', 'http://www.vietnewsonline.vn/RssView.aspx?ChannelID=68']]
    
    def __init__(self, host, port):
        MongoModel.__init__(self, host, port)
    
    def standardizeTimeStr(self, timeStr):
        '''Fri, 14 Oct 2011 12:30:55 +0700'''
        try:
            timeStr = commonlib.extractWithRegEx(r',\s+(\d+.+\d{4} \d+:\d+:\d+)', timeStr, 1)
            return datetime.datetime.strptime(timeStr, "%d %b %Y %H:%M:%S") + datetime.timedelta(seconds=time.timezone)
        except:
            traceback.print_exc()
            logger.error('standard time error with timeStr="{0}"'.format(timeStr))
            return datetime.datetime.now() + datetime.timedelta(hours=1) + datetime.timedelta(seconds=time.timezone)
    
    def detectCategory(self, catName):
        try:
            if catName == '': return None
            db = self.connection['englishnews']
            collection = db['category']
            row = collection.find_one({'name': catName})
            if row: return row['_id']
            collection.save({'name': catName})
            row = collection.find_one({'name': catName})
            if row: return row['_id']
            return None
        except:
            return None
        
    def getDetail(self, url):
        logger.debug('call getDetail url={0}'.format(url))
        data = {'content': [], 'thumbnail': ''}
        try:
            tree = commonlib.getXMLTree(url)
            if tree == None: return
            contentNode = commonlib.getArticleNode(etree.tounicode(tree), clean=[])
            t_data = html2text.html2text(etree.tounicode(contentNode), baseurl=url)
            firstImage = ''
            for item in t_data:
                if item['type'] == 'image':
                    img = commonlib.downloadNUpload(ssh, item['data'], DOWNLOAD_PATH, PREFIX)
                    if img != '': 
                        data['content'].append({'type': 'image', 'data': img})
                        if firstImage == '': firstImage = img
                else:
                    data['content'].append(item)
            data['thumbnail'] = firstImage
            pprint.pprint(data)
        except:
            logger.error(traceback.format_exc())
        finally:
            return data
    
    def working(self, item):
        logger.debug('call working item={0}'.format(item))
        try:
            db = self.connection['englishnews']
            collection = db['article']
            if collection.find_one({'hashUrl': item['hashUrl']}):
                print 'Already existed in database'
                return
            link = item['link']
            guid = item['guid']
            catId = item['catId']
            hashUrl = item['hashUrl']
            print item['title']
            print item['link']
            print item['description']
            print item['pubDate']
            print item['guid']
            print '________________________________________'
            detail = self.getDetail(item['link'])
            print '________________________________________'
            if len(detail['content']) == 0:
                logger.warning("Can't get detail of link {0}".format(link))
                return
            collection.save({'hashUrl': item['hashUrl'],
                             'title': item['title'],
                             'newsLink': item['link'],
                             'description': item['description'],
                             'thumbnail': detail['thumbnail'],
                             'category': catId,
                             'is_active': True,
                             'content': detail['content'],
                             'update': item['pubDate'],
                             'source': 'vietnewsonline.vn',
                             'timestamp': time.time(),
                             'lastupdate': datetime.datetime.utcnow()})
        except:
            logger.error(traceback.format_exc())
    
    def parseRss(self, link, catName):
        logger.debug('start parseRss catName={0}, link={1}'.format(catName, link))
        try:
            tree = commonlib.getXMLTree(link, isXML=True)
            if tree == None: return
            db = self.connection['englishnews']
            collection = db['article']
            queue = []
            for item in tree.xpath("//item"):
                title = commonlib.getElementText(item.xpath(".//title"))
                link = commonlib.getElementText(item.xpath(".//link"))
                if link == '': continue
                catId = self.detectCategory(catName)
                if catId == None: continue
                description = re.sub(r'<.+?>', '', commonlib.getElementText(item.xpath(".//description")))
                pubDate = self.standardizeTimeStr(commonlib.getElementText(item.xpath(".//pubDate")))
                guid = commonlib.extractWithRegEx(r'{0}/(\d+)/'.format(catName), link, 1)
                if guid == '':
                    logger.warning('detect id failed with url={0}'.format(link)) 
                    continue
                hashUrl = commonlib.getMD5Hash('vietnews_{0}'.format(guid))
                if collection.find_one({'hashUrl': hashUrl}):
                    print 'Already existed in database'
                    continue
                queue.append({'title': title, 'link': link, 'description': description, 'pubDate': pubDate, 'guid': guid, 'catId': catId, 'hashUrl': hashUrl})
            pool = workerpool.WorkerPool(size=2)
            pool.map(self.working, queue)
            pool.shutdown()
            pool.wait()
        except:
            logger.error(traceback.format_exc())
        
    def process(self):
        for cat in self.category:
            self.parseRss(cat[1], cat[0])
            
if __name__ == '__main__':
    lastaction = time.time()
    import argparse
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            os._exit(1)
    logger.info('start crawler tuoitrenews at {0}'.format(datetime.datetime.now()))
    crawler = Crawler(MONGO_SERVER, MONGO_PORT)
    crawler.process()
    if ssh is not None: ssh.close()
    logger.info('finished crawler vietnews at {0}'.format(datetime.datetime.now()))
    os._exit(1)