import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import pycommonlib as pyclib
import workerpool
import traceback
import datetime, time
import re
import html2textlib
import threading

from pymongo    import Connection
from termcolor  import cprint
from lxml       import etree


LOCAL_PATH = '/home/hoangnamhai/HarvestedData/tintuc/news'
#MONGO_SERVER = '27.0.12.106'   
#MONGO_SERVER = 'beta.mana.vn'   
MONGO_SERVER = 'localhost'   
MONGO_PORT = 27017
DATABASE = 'horoscope'
PREFIX = '/uploads/horoscope' 
SITE_URL = 'http://www.muctim.com.vn' 
BASE_URL = 'http://www.muctim.com.vn/Vietnam/The-gioi-tuoi-moi-lon/Thu-gian/'
MAX_COUNT = 15
MAX_ARTICLE = 30
MAX_PAGE = 20
os.umask(0000)

logger = pyclib.getMongoLog(MONGO_SERVER, MONGO_PORT, 'muctim.com.vn')
totalNewsCrawlered = 0; totalNewsDuplicated = 0
flgCopy = pyclib.getArgs()
ssh = None; sftp = None
if flgCopy!=None:
    ssh = pyclib.createSSHClient('mana.vn', 22, 'daudm', '')
    sftp = ssh.open_sftp()
    if ssh==None:
        if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
            logger.critical(unicode("crawler tin tức muctim.com.vn không hoạt động", 'utf8'))
            pyclib.forceQuit()
start = 0

class Muctim():
    def __init__(self, server, port, database):
        CONNECT = Connection(server, port)
        self.DB = CONNECT[database]
    
    def getRootLftCategory(self, name):
        try:
            collection = self.DB['category']
            if name==None or name=='': return None, None 
            root = collection.find_one({'data': 'Tử vi'}, {'root_id': 1})
            if root==None: 
                result = collection.find_one({'data': name}, {'root_id': 1, 'lft': 1})
            else:  
                result = collection.find_one({'data': name, 'root_id': root['root_id']}, {'root_id': 1, 'lft': 1})
            if result==None: cprint('Category chưa tồn tại !', 'red'); return None, None
            else: return result['root_id'], result['lft']
        except:
            traceback.print_exc()
    
    def getDatetime(self, text):
       try:
           result = datetime.datetime.utcnow()
           if text==None or text=='': return result
           m = pyclib.regexString('(\d+):(\d+):(\d+) (\d+)/(\d+)/(\d+)', text)
           if m:
               year = int(float(m.group(6))); month = int(float(m.group(5))); day = int(float(m.group(4)))
               hour = int(float(m.group(1))); minute = int(float(m.group(2))); second = int(float(m.group(3))) 
               return datetime.datetime(year, month, day, hour, minute, second) + datetime.timedelta(seconds=time.timezone)
           return result
       except:
           traceback.print_exc()
   
    def checkLinkImage(self, link):
        try:
            if link==None or link=='': return False
            m = pyclib.regexString('/EmoticonOng/', link)
            if m: return True
            return False
        except:
            traceback.print_exc()

    def checkArticleDuplicate(self, link):
        ''' Kiểm tra trùng tin tức trong DB, trả vê 1 nếu trùng, 0 trong trường hợp ngược lại
        ''' 
        try:
            collection = self.DB['article']
            if link==None or link=='': return None, None
            m = pyclib.regexString('/([a-z0-9]{1,3})/(\d+)/', link)
            if m: aId = '{0}-{1}'.format(m.group(1), m.group(2)) 
            else: aId = pyclib.getMd5(link)   
            result = collection.find_one({'hashUrl' : aId, 'source': 'muctim.com.vn'})
            if result!=None:  cprint('Tin tức đã tồn tại trong cơ sở dữ liệu', 'red'); return 1, aId
            return 0, aId 
        except:
            traceback.print_exc()
            return None, None
    
    def processArticle(self, link, cat):
        ''' Hàm xử lý chi tiết một tin tức, nếu tin đã tồn tại trong DB thì trả về: 1, ngược lại: 0
        '''
        try:
            global totalNewsCrawlered, totalNewsDuplicated
            if link==None or link=='': return
            root_id, lft = self.getRootLftCategory(self.CATEGORIES[cat]['category'])
            if root_id==None: return
            tags = self.CATEGORIES[cat]['tags']
            check_exists, aId = self.checkArticleDuplicate(link)
            if check_exists==1: totalNewsDuplicated += 1; return 1
            print ('########################################################################################')
            print pyclib.toAscii('Process article: ' + link)

            title = thumbnail = description = ''; data = []; flgImg = False; imageOfArticle = {}; flgStop = False
            postedDate = datetime.datetime.utcnow(); cCaption = ''; listKeys = []; caption=''
            tree = pyclib.getXMLTree(link)
            contentNode = tree.xpath('//div[@class="wrapper"]//div[@class="postpadding"]') 
            if len(contentNode) <= 0: cprint('Tin có cấu trúc khác, không thể lấy được nội dung', 'red'); return
            primaryNode = contentNode[0]
            titleNode = primaryNode.xpath('.//h1')
            if len(titleNode) > 0:  title = pyclib.getStringWithNode(titleNode[0]); titleNode[0].getparent().remove(titleNode[0])
            dateNode = primaryNode.xpath('.//div[@class="meta"]/span[@class="date"]')
            if len(dateNode) > 0:   
                dateArticle = pyclib.getStringWithNode(dateNode[0]); dateNode[0].getparent().remove(dateNode[0])
            timeNode = primaryNode.xpath('.//div[@class="meta"]/span[@class="time"]')
            if len(timeNode) > 0:   
                timeArticle = pyclib.getStringWithNode(timeNode[0]); timeNode[0].getparent().remove(timeNode[0])
            
            postedDate = self.getDatetime('{0} {1}'.format(timeArticle, dateArticle))
            introNode = primaryNode.xpath('.//p[@class="sapo"]')
            if len(introNode) > 0:  description = pyclib.getStringWithNode(introNode[0]); introNode[0].getparent().remove(introNode[0])
            # remove all tag style and script
            listStyle = primaryNode.xpath('.//style')
            if len(listStyle) > 0:
                for style in listStyle: style.getparent().remove(style)
            listScript = primaryNode.xpath('.//script')
            if len(listScript) > 0:
                for script in listScript: script.getparent().remove(script)
            contentNode = primaryNode.xpath('.//div[@class="content"]')
            if len(contentNode) < 1: return
            chtml = etree.tounicode(contentNode[0], method='html')    
            data, imgs = html2textlib.getContent(chtml, SITE_URL, output=False, stdOut=False)

            # Lấy ảnh trước và remove node không cần thiết
            listNode = primaryNode.xpath('.//div[@class="content"]/*')
            if len(listNode)==1: 
                if listNode[0].tag == 'table':
                    listNode = primaryNode.xpath('.//div[@class="content"]/table//tr')
                else:
                    listNode = primaryNode.xpath('.//div[@class="content"]/div/*')
                    if len(listNode)<1:
                        listNode = primaryNode.xpath('.//div[@class="content"]/span/*')
            for node in listNode:
                if node.tag == "img":
                    linkImage = node.get('src')
                    if self.checkLinkImage(linkImage): continue
                    if pyclib.getDatatypeName(linkImage)=='unicode': linkImage = linkImage.encode('utf-8')
                    if linkImage[:4]!='http':
                        if linkImage[:1]!='/': linkImage = '{0}/{1}'.format(SITE_URL, linkImage)
                        else: linkImage = '{0}{1}'.format(SITE_URL, linkImage)
                    linkSourceImage = re.sub(r'\s', '', linkImage)
                    linkSourceImage = re.sub(r'%20', '', linkSourceImage)
                    linkImage = linkImage.replace('../', '')
                    result = None; source = file_name = ''; size = 0
                    if flgCopy!=None:
                        result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                    else:
                        result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                    if result!=None:
                        if flgImg==False: flgImg = True; thumbnail = source
                        imageOfArticle[linkSourceImage] = {'data': source, 'type': 'image', 'caption': ''}
                    continue
                imgNode = node.xpath('.//img')
                if len(imgNode) > 0:
                    caption = pyclib.getStringWithNode(node)
                    if caption==None or len(caption)<2: captien = ''
                    childNode = node.xpath('./*')
                    flgCImg = False
                    for child in childNode:
                        if child.tag == "img": flgCImg = True; break
                        strNode = pyclib.getStringWithNode(child)  
                        if len(strNode)>1: break;       

                    if flgCImg==False: caption = ''
                    if len(imgNode)>=2: caption = ''
                    for img in imgNode:
                        linkImage = img.get('src')
                        cNode = node.xpath('.//span')   
                        if len(cNode) < 1: caption = ''
                        if self.checkLinkImage(linkImage): continue
                        if pyclib.getDatatypeName(linkImage)=='unicode': linkImage = linkImage.encode('utf-8')
                        if linkImage[:4]!='http':
                            if linkImage[:1]!='/': linkImage = '{0}/{1}'.format(SITE_URL, linkImage)
                            else: linkImage = '{0}{1}'.format(SITE_URL, linkImage)
                        linkSourceImage = re.sub(r'\s', '', linkImage)
                        linkSourceImage = re.sub(r'%20', '', linkSourceImage)
                        linkImage = linkImage.replace('../', '')
                        result = None; source = file_name = ''; size = 0
                        if flgCopy!=None:
                            result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                        else:
                            result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                        if result!=None:
                            if flgImg==False: flgImg = True; thumbnail = source
                            imageOfArticle[linkSourceImage] = {'data': source, 'type': 'image', 'caption': caption}

            flgLength = 0
            for i in range(0, len(data)):
                if data[i].has_key('src'): 
                    src_img = data[i]['src'];
                    if imageOfArticle.has_key(src_img):
                        data[i] = imageOfArticle[src_img]
                        cCaption = data[i]['caption'];  print pyclib.toAscii(data[i]['data'])
                        print 'Caption: ', pyclib.toAscii(cCaption)
                        if pyclib.getDatatypeName(cCaption) == 'unicode': cCaption = cCaption.encode('utf-8')
                        flgLength = len(cCaption)
                    else: listKeys.append(i)
                else:
                    if len(cCaption)>1: 
                        cData = data[i]['data']; 
                        if pyclib.getDatatypeName(cData) == 'unicode': cData = cData.encode('utf-8')
                        if len(cCaption) <= len(cData): cData = cData.replace(cCaption, '')
                        else: flgLength -= len(cData); listKeys.append(i); continue
                        if len(cData)>1: data[i]['data'] = cData;  print pyclib.toAscii(data[i]['data'])
                        else: listKeys.append(i)
                        cCaption = ''
                    else:
                        if flgStop: listKeys.append(i); continue 
                        if flgLength > 0: 
                            if len(data[i]['data'])<=(flgLength+2): listKeys.append(i); flgLength = 0; continue
                            flgLength = 0
                        if len(data[i]['data'])<2: listKeys.append(i); continue
                        if data[i]['type']=='textbold': print pyclib.toAscii(data[i]['data'])
                        else: print pyclib.toAscii(data[i]['data'])
                print '-----------------'
            for key in range(len(listKeys)-1, -1, -1): del data[listKeys[key]]
            #In ra màn hình để kiểm tra ảnh
            #print imgs
            #for item in imageOfArticle: print item 
            title = pyclib.toUnicodeDungSan(title)
            doc = ({ 'hashUrl'       :   aId,
                    'title'         :   title,
                    'thumbnail'     :   thumbnail,
                    'description'   :   description,
                    'content'       :   data,
                    'newsLink'      :   link,
                    'update'        :   postedDate,
                    'source'        :   'muctim.com.vn',
                    'category'      :   lft,
                    'root'          :   root_id,
                    'is_active'     :   True,
                    'lastupdate'    :   datetime.datetime.utcnow(),
                    'timestamp'     :   time.time(),
                    'date'          :   datetime.datetime.utcnow(),
                    'tags'          :   tags, })
            collection = self.DB['article']
            if len(data) > 0: totalNewsCrawlered += 1; collection.save(doc)
            else: cprint('XPath không đúng, không thể lấy được nội dung của tin.', 'red')
            print postedDate
            print aId
            if pyclib.getDatatypeName(title)=='unicode': title = title.encode('utf-8')
            print pyclib.toAscii('Title: ' + title)
            cprint('Thumbnail: ' + thumbnail, 'green')
            print pyclib.toAscii('Intro: ' + description)
            return 0
        except:
            traceback.print_exc()        
            
    def processPage(self, page, cat):
        try:
            gbcount = loop = 0 
            lurl = '{0}/{1}/trang-{2}.chn'.format(SITE_URL, self.CATEGORIES[cat]['link'], page)
            print pyclib.toAscii('Process page : ' + lurl)
            tree = pyclib.getXMLTree(lurl)
            listNode = tree.xpath('//div[@class="wrapper"]//div[@class="listnews"]//div[@class="title"]/a')
            if len(listNode) < 1: return
            for node in listNode:  
                count = self.processArticle('{0}{1}'.format(SITE_URL, node.get('href')), cat)
                if count!=None: gbcount += count; loop += 1
            return gbcount, loop
        except:
            traceback.print_exc(); return None, None
            
    def processCategory(self, cat):
        try:
            gbcount = 0; loop = 0
            cprint('Process category : ' + self.CATEGORIES[cat]['link'], 'yellow')
            lurl = '{0}/{1}.chn'.format(SITE_URL, self.CATEGORIES[cat]['link'])
            for page in range(1, MAX_PAGE):
                print 'COUNT, LOOP : (', gbcount, ', ', loop, ')' 
                if gbcount>MAX_COUNT or loop>MAX_ARTICLE:
                    cprint('Dừng xử lý do vượt quá số lượng trùng lặp cho phép hoặc vượt quá số lượng tin cần lấy.')
                    return 
                c, l = self.processPage(page, cat)
                if c!=None: gbcount += c; loop += l
        except:
            traceback.print_exc()

    def process(self, url):
        try:
           pass 
        except:
            traceback.print_exc()

    def processArticle(self, url):
        try:
           pass 
        except:
            traceback.print_exc()

    def processAll(self):
        try:
            lurl = BASE_URL
            tree = pyclib.getXMLTree(lurl)
            
        except:
            traceback.print_exc()

def timeOut():        
    global totalNewsCrawlered, totalNewsDuplicated
    while True:
        delta = time.time() - start
        if delta > 900:
            print 'Dừng chương trình vì vượt quá thời gian chạy.', datetime.datetime.now()
            if totalNewsCrawlered == 0 and totalNewsDuplicated == 0:
                logger.critical(unicode("crawler tin tức muctim.com.vn không hoạt động", 'utf8'))
            else:
                logger.info(unicode("Tổng số bài viết crawled: {0}, tổng số bài trùng lặp {1}".format(totalNewsCrawlered, totalNewsDuplicated), 'utf8'))
            logger.info('process timeout {0}'.format(delta))
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        time.sleep(30)

if __name__ == '__main__':
    try:
        print 'Start crawler: ', datetime.datetime.now()
    except:
        traceback.print_exc()
