# -*- coding: utf-8 -*-

''' Lay tin tuc the thao tu trang bongda.com.vn 
    @author Giangnh
    @created date 16/6/2011
'''
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import commonlib
import  traceback
import time
import datetime
import urllib
import workerpool
import html2dict
from MongoModel import MongoModel
from termcolor import cprint
from lxml import etree

DBNAME = 'bongda'
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/tym/bongda/'
PREFIX = '/uploads/bongda/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
MONGO_PORT = 27017
LOG = commonlib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'Tintuc_Bongda')

class BD(MongoModel):
    __maxPage = 5
    __url = 'http://www.bongda.com.vn/{_cat}/index.aspx'
    categories = {'vdqg-v-league': 'Việt Nam',
        'bong-da-anh': 'Anh',
        'bong-da-italia': 'Ý',
        'bong-da-taybannha': 'Tây Ban Nha'
        }
    
    def __init__(self, host, port=27017):
        MongoModel.__init__(self, host, port)
        
    def standardizeTime(self, timeStr):
        result = ''
        try:
            result = datetime.datetime.strptime(timeStr, "%H:%M %d/%m/%Y") + datetime.timedelta(seconds=time.timezone)
        except:
            datetime.datetime.utcnow()
        return result
    
    def itentifyId(self, url):
        pat = r'/(\d+)_.+'
        id = commonlib.extractWithRegEx(pat, url, 1)
        return id
    
    def parent(self, node, tag, maxLevel=3):
        ilv = 0
        pnode = node
        while pnode.getparent() != None and ilv <= maxLevel:
            ilv += 1
            if pnode.tag == tag: break
            pnode = pnode.getparent()
        return pnode
    
    def getImageAndCaption(self, contentNode, url):
        data = {}
        if contentNode == None: return data
        for item in contentNode.xpath(".//table//img"):
            src = commonlib.urlJoin(url, commonlib.getAttribText(item, 'src'))
            if src != '':
                pNode = self.parent(item, 'tr')
                nextNode = pNode.xpath("./following-sibling::*[1]")
                if nextNode != None:
                    text = commonlib.getElementText(nextNode, descendant=1)
                    if text.strip() != '':
                        if commonlib.wordCount(text) < 45:
                            data[hash(src)] = text.strip()
        return data 
    
    def processDetail(self, url, desc=''):
        LOG.debug('start processDetail(url={0})'.format(url))
        url += '?t=' + str(time.time())
        content = []
        nguontin = ''
        try:
            tree = commonlib.getXMLTree(url)
            sourceNode = tree.xpath("//div[@id='ctl00_BD_art_pnlNewsInfo']//b[@class='author']")
            if len(sourceNode) > 0:
                nguontin = commonlib.replaceStr(r'Theo\s', '', commonlib.getElementText(sourceNode))
            contentNode = tree.xpath("//div[contains(@id, 'pnlNewsContainer')]")
            if len(contentNode) > 0:
                commonlib.cleanElementWithAttrib(contentNode[0], 'td', [('class', 'maNlStandingsDiff')])
                commonlib.cleanElementWithAttrib(contentNode[0], 'table', [('class', ' cke_show_border')])     # NOTE: remove cai table chua dong "Mời bạn đọc tiếp tục chia sẻ những bình luận..."
                commonlib.dropTagWithIndex(contentNode[0], 'strong', 1)
                for item in contentNode[0].xpath(".//strong[contains(., 'Tỷ lệ cược')]/following-sibling::*".decode('utf-8')):
                    try:
                        item.getparent().remove(item)
                    except:
                        print 'Không remove được table tỷ lệ cược'
                commonlib.cleanAllElementAfter(contentNode[0], ".//strong[contains(., 'Tỷ lệ cược')]".decode('utf-8'), ".//strong[contains(., 'Tỷ lệ cược')]/following-sibling::*".decode('utf-8'))
                tmpData = html2dict.html2text(etree.tounicode(contentNode[0]), url)
                imageTable = self.getImageAndCaption(contentNode[0], url)
                stepOver = False
                ic = 0
                for i in range(len(tmpData)):
                    if ic < 3:
                        if tmpData[i]['data'] == desc: continue
                    ic += 1
                    if (stepOver): stepOver = False; continue
                    item = tmpData[i]
                    if item['type'] == 'image':
                        hashItem = item['hash']
                        try:
                            if imageTable[hashItem] != '':
                                if tmpData[i+1]['data'] == imageTable[hashItem]:
                                    stepOver = True
                                    item['caption'] = imageTable[hashItem]
                                elif (len(tmpData[i+1]['data']) > len(imageTable[hashItem]) and tmpData[i+1]['data'].startswith(imageTable[hashItem])):
                                    tmpData[i+1]['data'] = tmpData[i+1]['data'][len(imageTable[hashItem]):].strip().lstrip('|').strip()
                                    item['caption'] = imageTable[hashItem]
                            del item['hash']
                        except: pass
                        src = commonlib.downloadNUpload(ssh, item['data'], DOWNLOAD_PATH, PREFIX)
                        if src != '': item['data'] = src
                        else: continue
                    content.append(item)
            for i in content:
                print i['data']
        except:
            LOG.error(traceback.format_exc())
        finally:
            return content, nguontin
        
    def processPage(self, url, giaidau, currentProcessed=0, currentDuplicated=0):
        LOG.debug('start processPage')
        try:
            db = self.connection[DBNAME]
            collection = db['tinthethao']
            tree = commonlib.buildTreeFromHTML(urllib.urlopen(url).read())
            for element in tree.xpath("//table[@class='article']/tr"):
                LOG.debug('currentProcessed={0}, currentDuplicated={1}'.format(currentProcessed, currentDuplicated))
                articleLinkNode = element.xpath(".//a[@class='read_more']")
                if len(articleLinkNode) == 0: continue
                articleLink = commonlib.urlJoin(url, commonlib.getAttribText(articleLinkNode, 'href', ''))
                articleTitle = commonlib.getElementText(articleLinkNode)
                if 'Bản tin tỷ lệ cược' in articleTitle: continue
                id = self.itentifyId(articleLink)
                if id == '': continue
                hashLink = commonlib.getMD5Hash(id)
                isExist = collection.find_one({'hashLink': hashLink})
                if isExist != None: 
                    cprint("Already exist in database", 'yellow')
                    currentDuplicated += 1
                    continue
                metaNode = element.xpath("./following-sibling::*[1]/td")[0]
                thumbNode = metaNode.xpath("./img/@src")
                thumbNail = commonlib.urlJoin(url, thumbNode[0]) if len(thumbNode)>0 else ''
                if thumbNail != '':
                    thumbNail = commonlib.downloadNUpload(ssh, thumbNail, DOWNLOAD_PATH, PREFIX) 
                postedDate = commonlib.getElementText(element.xpath(".//sup[@class='date']"))
                postedDate = self.standardizeTime(postedDate)
                description = commonlib.getElementText(metaNode.xpath("./img"))
                content, nguontin = self.processDetail(articleLink, description)
                if nguontin == '': nguontin = 'bongda.com.vn'
                print "#################################"
                print 'id: ', id
                print 'Tieu de: ', articleTitle
                print 'Nguon tin: ', nguontin
                print 'Link: ', articleLink
                print 'Mo ta: ', description
                print 'Dang ngay: ', postedDate
                print "#################################"
                print '-----------------------------------------------------------------------'
                print 'CONTENT'
                print '-----------------------------------------------------------------------'
                for line in content:
                    if line['type'] == 'image':
                        print line['data'], '(', line['caption'], ')'
                    else:
                        print line['type'], ':', line['data']
                # --> luu vao database
                if len(content) > 1 and isExist==None:
                    collection.save({'title': articleTitle,
                        'articleLink': articleLink,
                        'hashLink': hashLink,
                        'thumbnail': thumbNail,
                        'description': description,
                        'postdate': postedDate,
                        'timestamp': time.time(),
                        'lastupdate': datetime.datetime.utcnow(),
                        'content': content,
                        'giaidau': giaidau,
                        'nguontin': nguontin
                        })
                    currentProcessed += 1
        except:
            LOG.error(traceback.format_exc())
        LOG.info("current process: {0}, current dupplicate: {1}".format(currentProcessed, currentDuplicated))
        return currentProcessed, currentDuplicated
        
    def process(self, cat):
        LOG.debug('start process: %s' % self.categories[cat])
        maxProcess = 30
        maxDuplicate = 15
        url = self.__url.format(_cat=cat)
        currentProcessed, currentDuplicated = 0, 0
        for i in range(1,self.__maxPage + 1):
            if currentProcessed > maxProcess or currentDuplicated > maxDuplicate:
                LOG.info('chuyen muc {0} dung do qua gioi han bai viet hoac do trung lap qua nhieu'.format(cat))
                LOG.info("current process: {0}, current dupplicate: {1}".format(currentProcessed, currentDuplicated))
                break
            pUrl = url + '?page_id=' + str(i)
            currentProcessed, currentDuplicated = self.processPage(pUrl, self.categories[cat], currentProcessed, currentDuplicated)

def quitIfTimeout():
    print 'call quitIfTimeout'
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 600:
                print 'process timeout {0}'.format(delta)
                print 'kill process {0}'.format(pid)
                os.system("kill -9 {0}".format(pid))
        except:
            print 'ERROR: could not kill python process with pid={0}'.format(pid)
        time.sleep(5)

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9) # kill -9 pid tren linux
        
if __name__ == '__main__':
    lastaction = time.time()
    import threading
    import argparse
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    threading.Thread(target=quitIfTimeout).start()
    LOG.info('start crawler bongda.com.vn')
    try:
        bd = BD(MONGO_SERVER, MONGO_PORT)
        pool = workerpool.WorkerPool(size=2)
        pool.map(bd.process, bd.categories.keys())
        pool.shutdown()
        pool.wait()
    except:
        LOG.error(traceback.format_exc())
    if ssh is not None: ssh.close()
    LOG.info('finished crawler bongda.com.vn')
    forceQuit()
