# -*- coding: utf-8 -*-
''' @author: Dinh Manh Dau <dinhmanhdau@gmail.com>
'''
import  traceback, re, workerpool, datetime, time, os, urllib
import  pycommonlib as pyclib
import  html2textlib
import HTMLParser
import cStringIO as StringIO

from    pymongo     import  Connection
from    termcolor   import  cprint
from    urlparse    import  urlparse
from    lxml        import  etree
from    readability.readability     import Document

MONGO_SERVER            =   'beta.mana.vn'
MONGO_PORT              =   27017
DATABASE                =   'rss'

LINK_COLLECTION         =   'link'
ARTICLE_COLLECTION      =   'article'

LOCAL_PATH              = '/home/hoangnamhai/HarvestedData/tintuc/news'
PREFIX                  = '/uploads/news'

class RSSGetter():

    webLink = { 
                'dantri.com.vn'         : {'url': 'http://dantri.com.vn',        'rss': 'http://dantri.com.vn/rss'},
                'vnexpress.net'         : {'url': 'http://vnexpress.net',        'rss': 'http://vnexpress.net/gl/rss'},
                '24h.com.vn'            : {'url': 'http://www3.24h.com.vn',      'rss': 'http://www3.24h.com.vn/guest/RSS'},
                'vietnamnet.vn'         : {'url': 'http://vietnamnet.vn',        'rss': 'http://vietnamnet.vn/vn/rss/index.html'},
    }

    def  __init__(self, server, port, database):
        CONNECT             = Connection(server, port)
        self.DB             = CONNECT[database]
   
    def __del__(self):
        pass

    def processWebsite(self, website):
        try:
            urlRss       =  self.webLink[website]['rss']
            cprint('Process : ' + urlRss, 'yellow')
            tree         =  pyclib.getXMLTree(urlRss, outputHTML=False) 
            listNode     =  tree.xpath('.//a')
            data         =  []
            for node in listNode:
                lurl     =  node.get('href')
                if lurl==None or len(lurl)<1: continue
                tag      =  pyclib.getStringWithNode(node) 
                if lurl.endswith('.rss'): 
                    if not lurl.startswith('http'):
                        if lurl[0]!='/': lurl = '{0}/{1}'.format(self.webLink[website]['url'], lurl)
                        else: lurl = '{0}{1}'.format(self.webLink[website]['url'], lurl)
                    preg    = re.compile(r'http')
                    count   = preg.findall(lurl)
                    if len(count)>=2: continue
                    flag = True
                    for item in data: 
                        if item['url']==lurl: flag = False; break
                    if flag and len(tag)>0: data.append({'url': lurl, 'tag': tag})
            for item in data: self.processURL(item['url'], item['tag'], website)
        except:
            traceback.print_exc()
    
    def getTagWithURL(self, url):
        try:
            if url==None or url=='': return
            _arr        =   url.split('/')
            if len(_arr) < 1: return
            text        =   _arr[-1]
            if len(text) > 4 and text.endswith('.rss'): 
                text    =   text[:-4]
                m       =   pyclib.regexString('home|trangchu|trang-chu', text)
                if not m:  return text
        except:
            traceback.print_exc()

    def checkArticleDuplicate(self, link, website):
        try:
            if link==None or link=='': return
            collection      =   self.DB[ARTICLE_COLLECTION]
            hashUrl         =   pyclib.getMd5(link)
            result          =   collection.find_one({'hashUrl': hashUrl, 'website': website}, {})
            if result==None: return True
            else: cprint('Tin tức đã tồn tại trong cơ sỡ dữ liệu.', 'red')
        except:
            traceback.print_exc()

    def processArticle(self, url, tag, website):
        try:
            if url==None or url=='': return
            checkExists         =   self.checkArticleDuplicate(url, website)
            if checkExists!=None and not checkExists: return
            print '###############################################################################' 
            cprint('Process Article: ' + url, 'yellow')
            html                =   urllib.urlopen(url).read()
            readable_article    =   Document(html).summary()
            print readable_article
            readable_title      =   Document(html).short_title()
            print 'Title: ', readable_title
            listKeys = []
            '''
            parser              =   etree.HTMLParser(encoding='utf-8')
            tree                =   etree.parse(StringIO.StringIO(readable_article), parser)
            result              =   etree.tostring(tree.getroot(), pretty_print = True, method='html')
            print(result)
            '''
            #listNode            =   tree.xpath('//*')
#            listNode            =   tree.xpath('//div[@class="MsoNormal"] or //p[@class="MsoNormal"]')
            #for node in listNode:
             #   print pyclib.getStringWithNode(node)
            data, imgs          =   html2textlib.getDictionary(readable_article, PREFIX, LOCAL_PATH, website, output=True, stdOut=True)
            '''
            for i in range(0, len(data)):
                if data[i]['type']:
                    cprint(data[i]['src'], 'yellow'); print '-----------------------------------'
                else: 
                    if len(data[i]['data']) > 0: 
                        cprint(data[i]['data'], 'green'); print '-----------------------------------'
                    else: listKeys.append(i)
            for i in range(len(data)-1, -1, -1): del data[i]
            '''
        except:
            traceback.print_exc()

    def processLink(self, link, tag, website):
        try:
            if link==None or link=='': return
            collection      =   self.DB[LINK_COLLECTION]
            hashUrl         =   pyclib.getMd5(link)
            result          =   collection.find_one({'hashUrl': hashUrl, 'website': website}, {})
            if result==None:
                doc = {'hashUrl': hashUrl, 'link': link, 'tag': tag, 'website': website}
                collection.save(doc)
                cprint('Url đã được lưu thành công.', 'green')
            else: 
                cprint('Url đã tồn tại trong cơ sỡ dữ liệu.', 'red')
            self.processArticle(link, tag, website)
        except:
            traceback.print_exc()

    def processURL(self, url, tag, website):
        try:
            if url==None or url=='': return
            cprint('Process URL: ' + url, 'yellow')
            tag             =   pyclib.toAscii(tag); tag = tag.lower()
            if tag.startswith('http'): tag = self.getTagWithURL(url) 
            cprint('Tag: ' + tag, 'yellow')
            m               =   pyclib.regexString('home|trang-chu|trang chu', tag)
            if m: return
            tree            =   pyclib.getXMLTree(url, isXML=True, outputHTML=False)
            listNode        =   tree.xpath('//item/link')
            for node in listNode:
               text     =   pyclib.getStringWithNode(node) 
               result   =   self.processLink(text, tag, website)
               #self.processArticle(text, tag, website)
        except:
            traceback.print_exc()

def forceQuit():
    try:
        print 'Finished.', datetime.datetime.now()
        pid = os.getpid(); os._exit(1); os.kill(pid, 9)
    except:
        traceback.print_exc()
        
if __name__ == '__main__':
    try:
        rss = RSSGetter(MONGO_SERVER, MONGO_PORT, DATABASE)
        #rss.processWebsite('dantri.com.vn')
        rss.processArticle('http://dantri.com.vn/c26/s26-512213/messicesc-toa-sang-barcelona-doat-sieu-cup-chau-au.htm', 'test', 'dantri.com.vn')
        #rss.processWebsite('vnexpress.net')
        #rss.processWebsite('24h.com.vn')
        #rss.processWebsite('vietnamnet.vn')
        '''
        pool = workerpool.WorkerPool(size=1)
        pool.map(rss.processWebsite, rss.webLink.keys())
        pool.shutdown(); pool.wait()
        forceQuit()
        '''
    except:
        traceback.print_exc()
