# -*- encoding: utf-8 -*-
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import pycommonlib as pyclib
import workerpool
import traceback
import datetime, time
import re
import threading
import mechanize
import cStringIO as StringIO

from pymongo    import Connection
from termcolor  import cprint
from lxml       import etree

LOCAL_PATH = '/home/hoangnamhai/HarvestedData/tuvi'
#MONGO_SERVER = '27.0.12.106'   
#MONGO_SERVER = 'beta.mana.vn'   
MONGO_SERVER = 'localhost'   
MONGO_PORT = 27017
DATABASE = 'tuvi'
PREFIX = '/uploads/tuvi' 
SITE_URL = 'http://www.zing.vn' 
LOGIN_URL = 'http://login.me.zing.vn'
BASE_URL = 'http://hrsc.apps.zing.vn/horoscope'
MAX_COUNT = 15
MAX_ARTICLE = 30
MAX_PAGE = 10
MAX_OFFSET = 10
os.umask(0000)

logger = pyclib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'zing.vn')
totalNewsCrawlered = 0; totalNewsDuplicated = 0
flgCopy = pyclib.getArgs()
ssh = None; sftp = None
if flgCopy!=None:
    ssh = pyclib.createSSHClient('mana.vn', 22, 'daudm', '')
    sftp = ssh.open_sftp()
    if ssh==None:
        if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
            logger.critical(unicode("crawler tin tức zing.vn không hoạt động", 'utf8'))
            pyclib.forceQuit()
start = 0
    
class Zing():
    def __init__(self, server, port, database):
        CONNECT = Connection(server, port)
        self.DB = CONNECT[database]

    CUNG = {
        1: {'name': unicode('Bảo Bình', 'utf-8'), 'date': '20/01-18/02', 'img': 'http://hrsc.apps.zing.vn/images/cung/baobinh.png'},
        2: {'name': unicode('Song Ngư', 'utf-8'), 'date': '19/02-20/03', 'img': 'http://hrsc.apps.zing.vn/images/cung/songngu.png'},
        3: {'name': unicode('Bạch Dương', 'utf-8'), 'date': '21/03-20/04', 'img': 'http://hrsc.apps.zing.vn/images/cung/bachduong.png'},
        4: {'name': unicode('Kim Ngưu', 'utf-8'), 'date': '21/04-20/05', 'img': 'http://hrsc.apps.zing.vn/images/cung/kimnguu.png'},
        5: {'name': unicode('Song Tử', 'utf-8'), 'date': '21/05-21/06', 'img': 'http://hrsc.apps.zing.vn/images/cung/songtu.png'},
        6: {'name': unicode('Cự Giải', 'utf-8'), 'date': '22/06-22/07', 'img': 'http://hrsc.apps.zing.vn/images/cung/cugiai.png'},
        7: {'name': unicode('Sư Tử', 'utf-8'), 'date': '23/07-23/08', 'img': 'http://hrsc.apps.zing.vn/images/cung/sutu.png'},
        8: {'name': unicode('Xử Nữ', 'utf-8'), 'date': '23/08-22/09', 'img': 'http://hrsc.apps.zing.vn/images/cung/xunu.png'},
        9: {'name': unicode('Thiên Bình', 'utf-8'), 'date': '23/09-22/10', 'img': 'http://hrsc.apps.zing.vn/images/cung/thienbinh.png'},
        10: {'name': unicode('Hổ Cáp', 'utf-8'), 'date': '23/10-21/11', 'img': 'http://hrsc.apps.zing.vn/images/cung/hocap.png'},
        11: {'name': unicode('Nhân Mã', 'utf-8'), 'date': '22/11-21/12', 'img': 'http://hrsc.apps.zing.vn/images/cung/nhanma.png'},
        12: {'name': unicode('Ma Kết', 'utf-8'), 'date': '22/12-19/01', 'img': 'http://hrsc.apps.zing.vn/images/cung/maket.png'},
    }    

    def login(self, user, passwd):
        try:
            self.br = mechanize.Browser()
            self.br.set_handle_redirect(True)
            self.br.open(LOGIN_URL)
            self.br.select_form('frmLogin')
            self.br.form['u'] = user
            self.br.form['p'] = passwd
            self.br.submit()
        except:
            traceback.print_exc()

    def getCung(self, index):
        try:
            collection = self.DB['cung']
            obj = self.CUNG[index]
            name = obj['name']  
            if name==None or name=='': return None, None 
            root = collection.find_one({'name': name}, {})
            if root==None:
                result = None; source = file_name = ''; size = 0
                linkImage = obj['img']
                if flgCopy!=None:
                    result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                    if result==None:
                        for i in range(0, 3):
                            result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                            if result!=None: break
                else:
                    result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                    if result==None:
                        for i in range(0, 3):
                            result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                            if result!=None: break
                if result!=None:
                    obj['img'] = source
                    print '## obj: ', obj
                    obj_id = collection.save(obj)
                    return obj_id
            else:
                return root['_id']
        except:
            traceback.print_exc()

    def checkDuplicate(self, link, date, cung_id, gender, _type):
        ''' Kiểm tra trùng tin tức trong DB, trả vê 1 nếu trùng, 0 trong trường hợp ngược lại
        ''' 
        try:
            collection = self.DB['article']
            if link==None or link=='': return None, None
            hashUrl = pyclib.getMd5(link)   
            result = collection.find_one({'cung_id': cung_id, 'date': date, 'sex': gender, 'type': _type, 'hashUrl' : hashUrl, 'source': 'zing.vn'})
            if result!=None:  cprint('Document is exists in database !!!', 'red'); return 1, hashUrl
            return 0, hashUrl 
        except:
            traceback.print_exc()
            return None, None
    
    def checkYearDuplicate(self, link, cung_id, _type):
        ''' Kiểm tra trùng tin tức trong DB, trả vê 1 nếu trùng, 0 trong trường hợp ngược lại
        ''' 
        try:
            collection = self.DB['article']
            if link==None or link=='': return None, None
            hashUrl = pyclib.getMd5(link)   
            result = collection.find_one({'cung_id': cung_id, 'type': _type, 'hashUrl' : hashUrl, 'source': 'zing.vn'})
            if result!=None:  cprint('Document is exists in database !!!', 'red'); return 1, hashUrl
            return 0, hashUrl 
        except:
            traceback.print_exc()
            return None, None

    def getDateWithOffset(self, day):
        try:
            today = datetime.date.today()
            offset = datetime.timedelta(days=day)
            tomorrow = today + offset
            text1 = tomorrow.strftime('%Y-%m-%d')
            text2 = tomorrow.strftime('%d-%m-%Y')
            date_time = datetime.datetime(tomorrow.year, tomorrow.month, tomorrow.day, 0, 0, 0)
            return text1, text2, date_time
        except:
            traceback.print_exc()

    def getDateNow(self):
        try:
           now = datetime.datetime.now()
           text1 = now.strftime('%Y-%m-%d')
           text2 = now.strftime('%d-%m-%Y')
           return text1, text2
        except:
            traceback.print_exc()
    
    def getXMLTree(self, url, output=False):
        try:
            self.br.open(url)
            html = self.br.response().read()
            parse = etree.HTMLParser(encoding='utf-8')
            tree = etree.parse(StringIO.StringIO(html), parse)
            if output:
                print html
            return tree
        except:
            traceback.print_exc()

    def toPrint(self, text):
        try:
            text = pyclib.toAscii(text)
            return text
        except:
            return text

    def getInfoYear(self):
        try:
            collection = self.DB['article']
            for cung_id in range(1, 13):
                obj_id = self.getCung(cung_id)
                lurl = 'http://apps.m.zing.vn/hrsc/year/detail?id={0}'.format(cung_id)
                check_exists, hashUrl = self.checkYearDuplicate(lurl, obj_id, 'year')
                if check_exists==1: continue
                cprint('Process page : ' + lurl, 'yellow')
                tree = self.getXMLTree(lurl)
                listNode = tree.xpath('//div[@class="contain"]//div[@class="conthrsc"]/*')
                flag = False; description = ''; data = []; i = 0
                print '------------------------------------'
                for node in listNode:
                    if flag and i>1: break
                    if node.tag=='div' and node.get('class')=='paging':
                        flag=True; i += 1
                        if i>1: break
                        strongTag = node.xpath('.//strong')
                        for tag in strongTag: tag.getparent().remove(tag)
                        description = pyclib.getStringWithNode(node) 
                        cprint(self.toPrint(description), 'yellow')
                        continue
                    aTag = node.xpath('.//a')
                    if len(aTag) > 0:
                        for taga in aTag: taga.getparent().remove(taga)
                    strongTag = node.xpath('.//strong')
                    if len(strongTag) > 0:
                        text = pyclib.getStringWithNode(strongTag[0])
                        if len(text) > 0:
                            data.append({'text': text, 'type': 'textbold'})
                            cprint(self.toPrint(text), 'yellow')
                    else:
                        text = pyclib.getStringWithNode(node)
                        if len(text) > 0:
                            data.append({'text': text, 'type': 'text'})
                            print self.toPrint(text)
                if len(data) > 0:
                    doc = {'content': data, 'description': description, 'link': lurl, 'hashUrl': hashUrl, 'cung_id': obj_id, 'type': 'year', 'source': 'zing.vn'}
                    collection.save(doc)
        except:
            traceback.print_exc()

    def processAll(self):
        try:
            ''' Ex: http://hrsc.apps.zing.vn/horoscope/index/index/gender/1/zidac_id/1/date/2012-02-09
            '''
            collection = self.DB['article']
            for offset in range(0, MAX_OFFSET):
                date, date2, date_time = self.getDateWithOffset(offset)
                print '--------------------------------------'
                print 'Process date: ', date2
                rows = collection.find({'date': date2}).count()
                if rows >= 24:
                    print 'Data was exists in database !!'
                    continue 
                for cung_id in range(1, 13):
                    for gender in range(0, 2):
                        lurl = '{0}/index/index/gender/{1}/zidac_id/{2}/date/{3}'.format(BASE_URL, gender, cung_id, date)
                        obj_id = self.getCung(cung_id)
                        check_exists, hashUrl = self.checkDuplicate(lurl, date2, obj_id, gender, 'date')
                        if check_exists==1: continue
                        cprint('Process page : ' + lurl, 'yellow')
                        tree = self.getXMLTree(lurl, output=False)
                        listNode = tree.xpath('//div[@class="bg_contentcen"]//div[contains(@class, "detail_content")]//h2')
                        doc = {}; data = []
                        for node in listNode:  
                            title = pyclib.getStringWithNode(node)
                            cprint(self.toPrint(title), 'yellow')
                            data.append({'data': title, 'type': 'textbold'})
                            content = pyclib.convertToText(node.tail)
                            print self.toPrint(content)
                            data.append({'data': content, 'type': 'text'})
                        if len(data) > 0:
                            doc = {'content': data, 'link': lurl, 'hashUrl': hashUrl, 'cung_id': obj_id, 'date': date2, 'datetime': date_time, 'sex': gender, 'type': 'date', 'source': 'zing.vn'}
                            collection.save(doc)
        except:
            traceback.print_exc()

def timeOut():        
    global totalNewsCrawlered, totalNewsDuplicated
    while True:
        delta = time.time() - start
        if delta > 900:
            print 'Dừng chương trình vì vượt quá thời gian chạy.', datetime.datetime.now()
            if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                logger.critical(unicode("crawler tin tức zing.vn không hoạt động", 'utf8'))
            else:
                logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
            logger.info('process timeout {0}'.format(delta))
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        time.sleep(30)

if __name__ == '__main__':
    try:
        print 'Start crawler zing.vn: ', datetime.datetime.now()
        totalNewsCrawlered = 0; totalNewsDuplicated = 0
        start = time.time() 
        timeout = threading.Thread(target=timeOut).start()
        crawler = Zing(MONGO_SERVER, MONGO_PORT, DATABASE)
        crawler.login('mana.vn', '~@mana.vn')
        crawler.getInfoYear()
        crawler.processAll()
        pyclib.forceQuit()
    except:
        traceback.print_exc()
