# -*- coding: utf-8 -*-
import lxml.html
import urllib
import re, sys, os
import datetime, time
import traceback
import hashlib
import workerpool
import cStringIO as StringIO
import threading
import pycommonlib as pyclib
from pymongo import Connection
from datetime import datetime
from lxml import etree
from urlparse import urljoin
from mechanize._opener import urlretrieve
from termcolor import cprint
from MongoDbLog import MongoDbLog

CONNECT = Connection('beta.mana.vn', 27017)
DB   = CONNECT['my_database']
IMAGE_COLLECTION = DB['xkcn']
ALBUM_COLLECTION = DB['album'] 
SITE_URL = 'http://xkcn.info'
PREFIX = '/uploads/xkcn'
LOCAL_PATH = '/home/hoangnamhai/HarvestedData/xkcn'
MAX_COUNT = 30
stringify = etree.XPath("string()")
totalCount  = 0
LOG = MongoDbLog('xkcn', 'debug')
os.umask(0000)
flgCopy             = pyclib.getArgs()
ssh = None; sftp = None
if flgCopy!=None:
    ssh     = pyclib.createSSHClient('mana.vn', 22, 'daudm', '1p%^IRg')
    sftp    = ssh.open_sftp()
    if ssh==None: pyclib.forceQuit()
start = 0

def getXMLTree(url, isXML=False, userAgent=False, outputHTML=False):
    try:
        ''' Hàm xây dựng cây:
            - Đọc HTML từ URL đưa vào
            - options : encoding='utf-8' override phương thức encoding
            = isXML : 
                +) False : HTMLParser
                +) True  : XMLParser
        '''
        if type(url).__name__ == 'unicode': url = url.encode('utf-8')
        if userAgent:
            user_agent  = 'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'
            opener      = mechanize.build_opener(mechanize.HTTPRefererProcessor)
            opener.addheaders = [("User-agent", user_agent)]
            response    = opener.open(url)
        else:
            response    = urllib.urlopen(url)
        html    = response.read()
        if outputHTML: print(html)
        if isXML==False: parser  = etree.HTMLParser(encoding='utf-8')
        else: parser = etree.XMLParser(encoding='utf-8')
        tree    = etree.parse(StringIO.StringIO(html), parser)
        return tree
    except:
        traceback.print_exc()

def getPathWithNameImage(src):
    ''' Hàm lấy tên ảnh ex: abcd
        và đường dẫn ex: a/b/c/d
    '''
    try:
        if type(src).__name__ == 'unicode': src = src.encode('utf-8')
        md5str = hashlib.md5(src).hexdigest()
        path = '{0}/{1}/{2}/{3}'.format(md5str[0], md5str[1], md5str[2], md5str[3])
        return md5str, path
    except:
        traceback.print_exc()
        return None, None
         
def getMaxPage(url):
    maxPage = 1000
    try:
        lurl = url.strip()
        if url==None or url=='': return
        tree        = getXMLTree(url)
        pageNode    = tree.xpath('//span[@id="pages"]')
        if len(pageNode)>0:
            text    = stringify(pageNode[0]).strip()
            preg    = re.compile(r'(\d+)$')
            m       = preg.search(text)
            if m: return int(float(m.group(1))) 
    except:
        LOG.error(traceback.format_exc())
    finally:
        return maxPage

def getAlbum():
    try:
        hashUrl     = hashlib.md5(SITE_URL).hexdigest()    
        result      = ALBUM_COLLECTION.find_one({'hashUrl': hashUrl})
        if result==None:
            doc = {'name': 'Xinh không chịu nổi', 'url': SITE_URL, 'hashUrl': hashUrl,
                   'date': datetime.now(), 'source': SITE_URL, 'website': SITE_URL, 'is_active': True}
            _objId = ALBUM_COLLECTION.save(doc)
            return _objId
        else: return result['_id']
    except:
        traceback.print_exc()
        
def processJob(job):
    LOG.debug('start processJob({0})'.format(job))
    try:
        global totalCount
        # Nếu trùng lặp lớn hơn MAX_COUNT thì kết thúc không chạy nữa
        if totalCount >= MAX_COUNT: 
            print 'Dừng chạy chương trình vì quá số lượng trùng lặp cho phép'
            print 'Finished .', datetime.now() 
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        if i == 0: lurl = SITE_URL
        else: lurl = urljoin(SITE_URL, '/page/' + str(job))
        cprint('URL ' + lurl, 'yellow')
        tree = getXMLTree(lurl)
        listNode = tree.xpath('//div[@class="post photo"]')
        if len(listNode) <= 0: return
        for node in listNode:
          # Nếu trùng lặp lớn hơn MAX_COUNT thì kết thúc không chạy nữa 
          if totalCount >= MAX_COUNT: 
              print 'Dừng chạy chương trình vì quá số lượng trùng lặp cho phép' 
              print 'Finished .', datetime.now()
              pid = os.getpid(); os._exit(1); os.kill(pid, 9)
          node_date = node.xpath('.//a[@class="timestamp"]')
          date = node_date[0].get('href').strip()
          arr = date.split('/'); now = datetime.now()
          if len(arr) < 3: return 
          date = datetime(int(arr[2]), int(arr[3]), int(arr[4]), now.hour, now.minute, now.second, 999999, tzinfo=now.tzinfo)
          node_img = node.xpath('./a/img')
          src =  node_img[0].get('src').strip()
          node_body = node.xpath('./div[@class="body"]/p')
          caption=''
          if node_body:
              caption=node_body[0].text
          hashUrl = hashlib.md5(src).hexdigest()
          result = IMAGE_COLLECTION.find_one({'hashUrl': hashUrl}, {}) 
          if result!=None:
                LOG.info('Already exist in database') 
                totalCount += 1; continue
          else:
                linkImage = src
                result = None; source = file_name = ''; size = 0
                if flgCopy!=None:
                    result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                else:
                    result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
               # saveImage(src, date, caption)
                if size > 0:
                    doc = {'name' : file_name, 'caption': caption, 'website': SITE_URL, 'album_id': getAlbum(), 'is_active': True,
                           'source' : source, 'date': date, 'size': size, 'link': src, 'hashUrl': hashlib.md5(src).hexdigest()}
                    IMAGE_COLLECTION.save(doc)
                    print doc                
    except:
        LOG.error(traceback.format_exc())
    finally:
        LOG.debug('finished processJob({0})'.format(job))
        
if __name__ == '__main__':
    LOG.info('start crawler xkcn')
    try:
        # MAX_COUNT: Giá trị tối đa ảnh trùng lặp
        maxPage = getMaxPage(SITE_URL); jobs = []
        for i in range(0, maxPage): jobs.append(i)
        pool = workerpool.WorkerPool(size=1)
        pool.map(processJob, jobs)
        if flgCopy!=None: ssh.close()
        pool.shutdown(); pool.wait()
    except:
        traceback.print_exc()
    LOG.info('finished crawler xkcn')
    pid = os.getpid(); os._exit(1); os.kill(pid, 9)
