# -*- coding: utf-8 -*-
import traceback, os, re
import commonlib, datetime, time
import workerpool
import html2dict
import sys
import logging
from MongoModel import MongoModel
from termcolor import cprint
from lxml import etree

MAX_PROCESSED = 15
MAX_DUPLICATED = 10
DBNAME = 'tintuc_v2'
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news/'
MONGO_SERVER = 'beta.mana.vn'    # Server Mana 64bit
MONGO_PORT = 27017
PREFIX = '/uploads/news/'

logger = commonlib.getMongoLog(MONGO_SERVER, MONGO_PORT, '24h.com.vn')

class Crawler24H(MongoModel):
    
    rootUrl = 'http://m.24h.com.vn/'
    url = 'http://m.24h.com.vn/{catid}.html'
    categories = {'tin-tuc-trong-ngay-c46': {'name': 'Văn hóa - Xã hội', 'tag': ['van hoa', 'xa hoi']},
        'an-ninh-hinh-su-c51': {'name': 'Pháp luật', 'tag': ['phap luat']},
        'tai-chinh-bat-dong-san-c161': {'name': 'Kinh doanh', 'tag': ['kinh doanh', 'tai chinh', 'bat dong san']},
        'thoi-trang-c78': {'name': 'Làm đẹp - Thời trang', 'tag': ['thoi trang']},
        'thoi-trang-hi-tech-c407': {'name': 'Công nghệ số', 'tag': ['cong nghe so', 'thoi trang hitect']},
        'am-thuc-c460': {'name': 'Ẩm thực', 'tag': ['am thuc']},
        'lam-dep-c145': {'name': 'Làm đẹp - Thời trang', 'tag': ['lam dep', 'thoi trang']},
        'phim-c74': {'name': 'Giải trí', 'tag': ['giai tri', 'phim']},
        'giao-duc-du-hoc-c216': {'name': 'Giáo dục', 'tag': ['giao duc', 'du hoc']},
        'ban-tre-cuoc-song-c64': {'name': 'Tâm sự', 'tag': ['suc khoe', 'gioi tinh', 'tam su']},
        'the-thao-c101': {'name': 'Thể thao', 'tag': ['the thao']},
        'bong-da-c48': {'name': 'Thể thao', 'tag': ['the thao', 'bong da']}
        }
    
    def __init__(self, host='localhost', port=27017):
        MongoModel.__init__(self, host, port)
        self.__alreadyDetected = False
        self.__step = 12
    
    def identifyId(self, url):
        # -c216a394105 -> (-c216a) - 394105
        pattern = r'-[a-z][\d]{1,3}[a-z]{1}(\d+)\.html'
        id = commonlib.extractWithRegEx(pattern, url, 1, '')
        return id
    
    def getLeftRootOfCategory(self, catName):
        db = self.connection[DBNAME]
        collection = db['category']
        q = collection.find_one({'data': catName}, {'lft': 1, 'root_id': 1})
        result = {'category': q['lft'], 'root': q['root_id']} if q else ''
        return result
    
    def standardizeTimeString(self, timeStr):
        pattern = r'(\d+/\d+/\d+.+\d+:\d+)'
        timestr = commonlib.extractWithRegEx(pattern, timeStr, 1)
        postedTime = datetime.datetime.strptime(timestr, "%d/%m/%Y, %H:%M")
        return postedTime + datetime.timedelta(seconds=time.timezone)
    
    def parent(self, node, tag, maxLevel=3):
        ilv = 0
        pnode = node
        while pnode.getparent() != None and ilv <= maxLevel:
            ilv += 1
            if pnode.tag == tag: break
            pnode = pnode.getparent()
        return pnode
    
    def getImageAndCaption(self, contentNode, url):
        data = {}
        if contentNode == None: return data
        for img in contentNode.xpath(".//img"):
            src = commonlib.urlJoin(url, commonlib.getAttribText(img, 'src'))
            if src != '':
                pnode = self.parent(img, 'center', 2)
                captionText = ''
                if pnode.tag == 'center':
                    captionNode = pnode.xpath("./following-sibling::*[1]")
                
                    if len(captionNode) > 0:
                        if captionNode[0].tag != 'div':
                            captionText = commonlib.getElementText(captionNode, descendant=1)
                data[hash(src)] = captionText if commonlib.wordCount(captionText) < 30 else '' 
        return data 
    
    def loaibo(self, matchStr, blackListWord=[]):
        for blw in blackListWord:
            if re.search(blw, matchStr): return True
        return False
    
    def getDetailNews(self, url, catId):
        print('getDetailNews url={0}'.format(url))
        firstPage = True
        newsLink = url
        global totalNewsCrawlered
        try:
            id = self.identifyId(url)
            hashUrl = commonlib.getMD5Hash('24h.com.vn_{0}'.format(id))
            db = self.connection[DBNAME]
            collection = db['article']
            cat = self.getLeftRootOfCategory(self.categories[catId]['name'])
            if cat == '':
                logger.info(unicode('Chuyên mục {0} không tồn tại'.format(self.categories[catId]['name']), 'utf-8'))
                return
            if collection.find_one({'hashUrl': hashUrl}):
                logger.warning('Already existed in database !!!') 
                return
            data = []
            while True:
                tree = commonlib.getXMLTree(url)
                if tree == '' or tree == None: return
                if firstPage:
                    title = commonlib.getElementText(tree.xpath("//div[@class='tin-anh']/h3[1]/a"), descendant=1)
                    title = title.replace('–', '-')
                    if collection.find_one({'title': title}): 
                        logger.warning('Already existed in database !!!')
                        return
                    thumbnail = commonlib.getAttribText(tree.xpath("//div[@class='tin-anh']/a/img[@class='img-thumb']"), 'src')
                    thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '' else ''
                    if thumbnail != '':
                        thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                    postedDate = commonlib.getElementText(tree.xpath("//div[@class='tin-anh']/div[@class='bv-date']"))
                    postedDate = self.standardizeTimeString(postedDate)
                    description = commonlib.getElementText(tree.xpath("//div[@class='tin-anh']/div[@class='bv-content']"), descendant=1)
                    firstPage = False
                    # -----------------------------------------------------------
                    print("# ---------------------------------------------------")
                    cprint("Chuyên mục {0}".format(self.categories[catId]['name']))
                    print("Title: {0}".format(title))
                    print("Date: {0}".format(postedDate))
                    print("Description: {0}".format(description))
                    print("Thumbnail: {0}".format(thumbnail))
                    print("# ---------------------------------------------------")
                    # -----------------------------------------------------------
                contentNode = tree.xpath("//div[@class='tin-anh']")
                if len(contentNode) < 1:
                    raise Exception('ERROR: Xai xpath chi tiết tin !!!'); break 
                contentNode = contentNode[0]
                commonlib.cleanElementWithAttrib(contentNode, 'span', [('class', 'bv-tieude')])
                commonlib.cleanElementWithAttrib(contentNode, 'div', [('class', 'bv-content')])
                commonlib.cleanElementWithAttrib(contentNode, 'div', [('class', 'bv-date')])
                commonlib.cleanElementWithAttrib(contentNode, 'img', [('class', 'img-thumb')])
                commonlib.cleanElementWithAttrib(contentNode, 'div', [('class', 'sukien2')])
                tmpData = html2dict.html2text(etree.tounicode(contentNode), url)
                imageTable = self.getImageAndCaption(contentNode, url)
                stepOver = False
                for i in range(len(tmpData)):
                    if (stepOver): stepOver = False; continue
                    item = tmpData[i]
                    if item['type'] == 'image':
                        hashItem = item['hash']
                        try:
                            if ((imageTable[hashItem] != '') and (tmpData[i+1]['type'] != 'textbold')):
                                if tmpData[i+1]['data'] == imageTable[hashItem]:
                                    stepOver = True
                                    item['caption'] = imageTable[hashItem]
                                elif (len(tmpData[i+1]['data']) > len(imageTable[hashItem]) and tmpData[i+1]['data'].startswith(imageTable[hashItem])):
                                    tmpData[i+1]['data'] = tmpData[i+1]['data'][len(imageTable[hashItem]):].strip().lstrip('|').strip()
                                    item['caption'] = imageTable[hashItem]
                            del item['hash']
                        except: pass
                        src = commonlib.downloadNUpload(ssh, re.sub(r'(\.\./)+', '', item['data']), DOWNLOAD_PATH, PREFIX)
                        if src != '': 
                            item['data'] = src
                            if thumbnail.strip() == '': thumbnail = src
                        else: continue
                    else:
                        if self.loaibo(item['data'], ['Bấm đây để xem lại video', 'Trang trước.+Trang sau', 'Bấm để xem kỳ', 
                            'Xem thêm các kỳ', 'Kỳ đầu Kỳ trước.+', 'Quý vị và các bạn hãy gửi bài viết']): continue
                    data.append(item)
                nextNode = tree.xpath("//div[@class='phan-trang']")
                if len(nextNode) == 0: break
                url = self.detectNextPage(nextNode[0], url)
                if url == '': break
            # -------------------------------------------------------------------
            for item in data:
                if item['type'] == 'image':
                    print item['type'], ': ', item['data'], '-> ', item['caption']
                else:
                    print item['type'], ': ', item['data']
            # -------------------------------------------------------------------
            if (len(data) > 0):
                collection.save({'hashUrl': hashUrl,
                    'title': title,
                    'thumbnail': thumbnail,
                    'description': description,
                    'content': data,
                    'newsLink': newsLink,
                    'update': postedDate,
                    'source': '24h.com.vn',
                    'author': '',
                    'category': cat['category'],
                    'root': cat['root'],
                    'is_active': True,
                    'lastupdate': datetime.datetime.utcnow(),
                    'timestamp': time.time(),
                    'date': datetime.datetime.utcnow(),
                    'tag': self.categories[catId]['tag']
                    })
                totalNewsCrawlered += 1
        except:
            logger.exception(str(sys.exc_info()[1]))
            print 'ERROR at url={0}'.format(url)

    def detectNextPage(self, phanTrangNode, baseUrl='http://m.24h.com.vn/'):
        crPage = int(commonlib.extractWithRegEx(r'\[(\d+)\]', commonlib.getElementText(phanTrangNode, descendant=1), 1))
        nextPage = crPage + 1
        nextLink = commonlib.getAttribText(phanTrangNode.xpath(".//a[contains(., '{0}')]".format(nextPage)), 'href')
        if nextLink == '': return ''
        return commonlib.urlJoin(baseUrl, nextLink)

    def getListNews(self, catId):
        print('getListNews(catId={0})'.format(catId))
        currentProcessed = 0
        currentDuplicated = 0
        try:
            db = self.connection[DBNAME]
            collection = db['article']
            url = self.url.format(catid=catId)
            while True:
                cprint('chuyen muc: {0}, currentProcessed={1}, currentDuplicated={2}'.format(commonlib.encodeUTF8Str(self.categories[catId]['name']), currentProcessed, currentDuplicated), 'green')
                if currentProcessed > MAX_PROCESSED or currentDuplicated > MAX_DUPLICATED:
                    if currentProcessed > MAX_PROCESSED: logger.info(unicode('Chuyen muc {0} dung do qua gioi han {1} cho phep'.format(commonlib.encodeUTF8Str(self.categories[catId]['name']), MAX_PROCESSED), 'utf-8'))
                    if currentDuplicated > MAX_DUPLICATED: logger.info(unicode('Chuyen muc {0} dung do duplicate > {1}'.format(commonlib.encodeUTF8Str(self.categories[catId]['name']), MAX_DUPLICATED), 'utf-8')) 
                    break
                tree = commonlib.getXMLTree(url)
                if tree == '' or tree == None: break
                for item in tree.xpath("//h2[contains(., 'Sự kiện tiêu điểm')]/../following-sibling::*[@class='tin-anh']".decode('utf-8')):
                    if item.getparent() != None: item.getparent().remove(item)
                for item in tree.xpath("//div[@class='phan-trang'][1]/following-sibling::*[@class='tin-anh']"):
                    cprint('chuyen muc: {0}, currentProcessed={1}, currentDuplicated={2}'.format(commonlib.encodeUTF8Str(self.categories[catId]['name']), currentProcessed, currentDuplicated), 'red')
                    if currentProcessed > MAX_PROCESSED or currentDuplicated > MAX_DUPLICATED:
                        cprint('Stopped {0}'.format(commonlib.encodeUTF8Str(self.categories[catId]['name'])), 'yellow') 
                        break
                    title = commonlib.getElementText(item.xpath("./h3/a"))
                    try:
                        if (('video' in title.lower()) and ('bài bình' not in title.lower())):
                            logger.info(unicode('Không lấy bài viết chỉ có video', 'utf-8')); continue 
                    except: pass
#                    description = commonlib.getElementText(item.xpath("./h3/following-sibling::*[1]"))
                    newsLink = commonlib.getAttribText(item.xpath("./h3/a"), 'href', '')
                    newsLink = commonlib.urlJoin(url, newsLink) if newsLink != '' else ''
                    if newsLink == '': continue
                    id = self.identifyId(newsLink)
                    if id == '': continue
                    hashUrl = commonlib.getMD5Hash('24h.com.vn_{0}'.format(id))
                    isExist = collection.find_one({'hashUrl': hashUrl})
                    if isExist:
                        currentDuplicated += 1
                        print('this news already exist in database hashUrl: {0}'.format(hashUrl))
                        continue
                    currentProcessed += 1
                    self.getDetailNews(newsLink, catId)
                    print('Chuyên mục {0} currentProcessed={1}, currentDuplicated={2}'.format(commonlib.encodeUTF8Str(self.categories[catId]['name']), currentProcessed, currentDuplicated))
                nextNode = tree.xpath("//div[@class='phan-trang'][1]")
                if len(nextNode) == 0: break
                url = self.detectNextPage(nextNode[0])
                if url == '': break
                print '*****************************'
                cprint('Go to page: {0}'.format(url), 'green')
                print '*****************************'
            global totalNewsDuplicated
            totalNewsDuplicated += currentDuplicated
        except:
            logger.exception(str(sys.exc_info()[1]))
        finally:
            logger.info(unicode('Chuyên mục {0} currentProcessed={1}, currentDuplicated={2}'.format(commonlib.encodeUTF8Str(self.categories[catId]['name']), currentProcessed, currentDuplicated), 'utf8'))

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9) # kill -9 pid tren linux

def quitIfTimeout():
    print 'call quitIfTimeout'
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 600:
                if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                    logger.critical(unicode("crawler tin tức 24h.com.vn không hoạt động", 'utf8'))
                else:
                    logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
                logger.info('process timeout {0}'.format(delta))
                logger.info('kill process {0}'.format(pid))
                os.system("kill -9 {0}".format(pid))
        except:
            logger.error('ERROR: could not kill python process with pid={0}'.format(pid))
        time.sleep(10)

if __name__ == '__main__':
    
    totalNewsCrawlered = 0
    totalNewsDuplicated = 0
    lastaction = time.time()
    import threading
    import argparse
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    # -----------------------------
    threading.Thread(target=quitIfTimeout).start()
    logger.info('start crawler 24h.com.vn')
    crawler = Crawler24H(MONGO_SERVER, MONGO_PORT)
    try:
        pool = workerpool.WorkerPool(size=2)
        pool.map(crawler.getListNews, crawler.categories.keys())
        pool.shutdown()
        pool.wait()
    except:
        logger.error(str(sys.exc_info()[1]))
    if ssh is not None: ssh.close()
    if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
        logger.critical(unicode("crawler tin tức 24h.com.vn không hoạt động", 'utf8'))
    else:
        logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
    logger.info('finished crawler 24h.com.vn')
    forceQuit()
