# -*- coding: utf-8 -*-
import sys
sys.path.append('/home/crawler/src')
import readability
import commonlib
import traceback
import datetime
import time
import os
import html2dict as html2text
import pprint
from lxml import etree
from MongoModel import MongoModel
import workerpool

MONGO_SERVER = 'beta.mana.vn'
MONGO_PORT = 27017
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news/'
PREFIX = '/uploads/news/'
MAX_PROCESSED = 15
MAX_DUPLICATED = 10
DBNAME = 'englishnews'
logger = commonlib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'tuoitrenews')


class Crawler(MongoModel):
    
    rssLink = 'http://tuoitrenews.vn/cmlink/1.205?localLinksEnabled=false'
    def __init__(self, host, port):
        MongoModel.__init__(self, host, port)
    
    def standardizeTimeStr(self, timeStr):
        '''Fri, 14 Oct 2011 12:30:55 +0700'''
        try:
            timeStr = commonlib.extractWithRegEx(r',\s+(\d+.+\d{4} \d+:\d+:\d+)', timeStr, 1)
            return datetime.datetime.strptime(timeStr, "%d %b %Y %H:%M:%S") + datetime.timedelta(seconds=time.timezone)
        except:
            traceback.print_exc()
            logger.exception('standard time error with timeStr="{0}"'.format(timeStr))
            return datetime.datetime.now() + datetime.timedelta(hours=1) + datetime.timedelta(seconds=time.timezone)
    
    def detectCategory(self, url):
        try:
            catName = commonlib.extractWithRegEx(r'tuoitrenews/(.+?)/', url, 1).capitalize()
            if catName == '': return None
            db = self.connection['englishnews']
            collection = db['category']
            row = collection.find_one({'name': catName})
            if row: return row['_id']
            collection.save({'name': catName})
            row = collection.find_one({'name': catName})
            if row: return row['_id']
            return None
        except:
            logger.exception('detect category error with url={0}'.format(url))
            return None
        
    def getDetail(self, url):
        logger.debug('call getDetail url={0}'.format(url))
        data = {'content': [], 'thumbnail': ''}
        try:
            tree = commonlib.getXMLTree(url)
            if tree == None: return
            contentNode = commonlib.getArticleNode(etree.tounicode(tree), clean=[])
            t_data = html2text.html2text(etree.tounicode(contentNode), baseurl=url)
            firstImage = ''
            for item in t_data:
                if item['type'] == 'image':
                    img = commonlib.downloadNUpload(ssh, item['data'], DOWNLOAD_PATH, PREFIX)
                    if img != '': 
                        data['content'].append({'type': 'image', 'data': img})
                        if firstImage == '': firstImage = img
                else:
                    data['content'].append(item)
            data['thumbnail'] = firstImage
            pprint.pprint(data)
        except:
            logger.exception(str(sys.exc_info()[1]))
        finally:
            return data
    
    def working(self, item):
        try:
            global totalNewsCrawlered
            db = self.connection['englishnews']
            collection = db['article']
            if collection.find_one({'hashUrl': item['hashUrl']}):
                print 'Already existed in database'
                return
            link = item['link']
            guid = item['guid']
            catId = item['catId']
            hashUrl = item['hashUrl']
            print item['title']
            print item['link']
            print item['description']
            print item['pubDate']
            print item['guid']
            print '________________________________________'
            detail = self.getDetail(item['link'])
            print '________________________________________'
            if len(detail['content']) == 0:
                logger.warning("Can't get detail of link {0}".format(link))
                return
            collection.save({'hashUrl': item['hashUrl'],
                             'title': item['title'],
                             'newsLink': item['link'],
                             'description': item['description'],
                             'thumbnail': detail['thumbnail'],
                             'category': catId,
                             'is_active': True,
                             'content': detail['content'],
                             'update': item['pubDate'],
                             'source': 'tuoitrenews',
                             'timestamp': time.time(),
                             'lastupdate': datetime.datetime.utcnow()})
            totalNewsCrawlered += 1
        except:
            logger.exception(str(sys.exc_info()[1]))
    
    def parseRss(self, link):
        try:
            global totalNewsDuplicated
            tree = commonlib.getXMLTree(link, isXML=True)
            if tree == None: return
            db = self.connection['englishnews']
            collection = db['article']
            queue = []
            for item in tree.xpath("//item"):
                title = commonlib.getElementText(item.xpath(".//title"))
                link = commonlib.getElementText(item.xpath(".//link"))
                if link == '': continue
                catId = self.detectCategory(link)
                if catId == None: continue
                description = commonlib.getElementText(item.xpath(".//description"))
                pubDate = self.standardizeTimeStr(commonlib.getElementText(item.xpath(".//pubDate")))
                guid = commonlib.getElementText(item.xpath('.//guid'))
                hashUrl = commonlib.getMD5Hash('tuoitrenews_{0}'.format(guid))
                if collection.find_one({'hashUrl': hashUrl}):
                    totalNewsDuplicated += 1
                    print 'Already existed in database'
                    continue
                queue.append({'title': title, 'link': link, 'description': description, 'pubDate': pubDate, 'guid': guid, 'catId': catId, 'hashUrl': hashUrl})
            
            pool = workerpool.WorkerPool(size=3)
            pool.map(self.working, queue)
            pool.shutdown()
            pool.wait()
        except:
            logger.exception(str(sys.exc_info()[1]))
            
if __name__ == '__main__':
    totalNewsCrawlered = 0
    totalNewsDuplicated = 0
    lastaction = time.time()
    import argparse
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            os._exit(1)
    logger.info('start crawler tuoitrenews at {0}'.format(datetime.datetime.now()))
    crawler = Crawler(MONGO_SERVER, MONGO_PORT)
#    crawler.getDetail('http://tuoitrenews.vn/cmlink/tuoitrenews/business/overseas-vietnamese-invest-over-1-bln-1.47946?localLinksEnabled=false')
    crawler.parseRss(crawler.rssLink)
    if ssh is not None: ssh.close()
    if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
        logger.critical(unicode("crawler tin tức afamily.vn không hoạt động", 'utf8'))
    else:
        logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
    logger.info('finished crawler tuoitrenews at {0}'.format(datetime.datetime.now()))
    os._exit(1)