# -*- coding: utf-8 -*-
import lxml
import urllib 
import re, sys, os
import mechanize
import datetime, time
import traceback
import workerpool
import hashlib
import urlparse
import binascii
import cStringIO as StringIO
from lxml       import etree
from pymongo    import Connection
from urlparse   import urljoin
from termcolor  import cprint 

LOCAL_PATH = '/home/hoangnamhai/HarvestedData/xemanhdep'
MONGO_SERVER    = 'beta.mana.vn'   
MONGO_PORT      = 27017
XEMANH_DB       = 'my_database'
PREFIX      = '/uploads/xemanhdep' 
SITE_URL    = 'http://xemanhdep.com'
LIST_URL    = ['http://xemanhdep.com', 'http://girlxinh.xemanhdep.com', 'http://gallery.xemanhdep.com']
MAX_COUNT   = 8
CONNECT     = Connection(MONGO_SERVER, MONGO_PORT)
DB          = CONNECT[XEMANH_DB]
IMAGE_COLLECTION    = DB['images']
ALBUM_COLLECTION    = DB['album']
stringify   = etree.XPath("string()")
gbhome = 0; gbgirlxinh = 0; gbgallery = 0;  flagHome = flagGirl = flagGallery  = False 
os.umask(0000)

def getXMLTree(url, isXML=False, userAgent=False, outputHTML=False):
    try:
        ''' Hàm xây dựng cây:
            - Đọc HTML từ URL đưa vào
            - options : encoding='utf-8' override phương thức encoding
            = isXML : 
                +) False : HTMLParser
                +) True  : XMLParser
        '''
        if type(url).__name__ == 'unicode': url = url.encode('utf-8')
        if userAgent:
            user_agent  = 'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'
            opener      = mechanize.build_opener(mechanize.HTTPRefererProcessor)
            opener.addheaders = [("User-agent", user_agent)]
            response    = opener.open(url)
        else:
            response    = urllib.urlopen(url)
        html    = response.read()
        if outputHTML: print(html)
        if isXML==False: parser  = etree.HTMLParser(encoding='utf-8')
        else: parser = etree.XMLParser(encoding='utf-8')
        tree    = etree.parse(StringIO.StringIO(html), parser)
        return tree
    except:
        traceback.print_exc()
            
def getYearMonthFromUrl(url):
    ''' Hàm lấy datetime từ string
        Ex : http://girlxinh.xemanhdep.com/2011/05/%E1%BA%A3nh-d%E1%BA%B9p-girl-xinh-p65/
        ->  Year: 2011, Month: 05 
    '''
    try:
        if url==None or url=='': return None, None
        preg = re.compile(r'\/(\d+)\/(\d+)\/')
        m    = preg.search(url)
        if m: return m.group(1), m.group(2)
        return None, None
    except:
        traceback.print_exc()
        return None, None
    
def getPathWithNameImage(src):
    ''' Hàm lấy tên ảnh ex: abcd
        và đường dẫn ex: a/b/c/d
    '''
    try:
        if type(src).__name__ == 'unicode': src = src.encode('utf-8')
        md5str = hashlib.md5(src).hexdigest()
        path = '{0}/{1}/{2}/{3}'.format(md5str[0], md5str[1], md5str[2], md5str[3])
        return md5str, path
    except:
        traceback.print_exc()
        return None, None
 
def saveImage(src, prefix, localPath):
    ''' Hàm lưu ảnh (nếu trong quá trình lưu ảnh bị lỗi thử lại 3 lần) 
        Giá trị trả về : 
            - 0: Không bị trùng lặp
            - 1: Đã có trong database
    '''
    try:
        if type(src).__name__ == 'unicode': src = src.encode('utf-8')
        img_name, path = getPathWithNameImage(src)
        if img_name==None: return None, None
        check_file   = '{0}/{1}/{2}.jpg'.format(localPath, path, img_name)
        path_dir     = '{0}/{1}'.format(localPath, path)
        source       = '{0}/{1}/{2}.jpg'.format(prefix, path, img_name)
        file_name    = '{0}.jpg'.format(img_name)
        flag = False; retry = 0; maxRetry = 3; result = 1; size = 0
        while not flag and retry < maxRetry:
            try:
                if not os.path.isfile(check_file):
                    if not os.path.exists(path_dir): os.makedirs(path_dir, 0777)
                    location, response = urllib.urlretrieve(src, check_file)
                    if not re.search('image', response['Content-Type']): 
                        os.unlink(location)
                        raise Exception('%s: file is not image' % check_file)
                    if long(response['Content-Length'])!=os.path.getsize(location):
                        os.unlink(location)
                        raise Exception('%s: size of file not equal file response' % check_file)
                    size = os.path.getsize(location)
                    if size>0: 
                        cprint('Lưu ảnh thành công!', 'green'); break
                else:
                    cprint('Ảnh đã tồn tại !', 'red'); result = 1; break
            except:
                traceback.print_exc()
                retry += 1
        return result, source, file_name, size
    except:
        traceback.print_exc()
        return None, None, None, None

def checkAlbumExists(title, day, month, year, url):
    try:
        if url==None or url=='': return None
        print 'Album : ', title
        hashUrl     = hashlib.md5(url.strip()).hexdigest()
        result      = ALBUM_COLLECTION.find_one({'hashUrl': hashUrl}, {})
        date = datetime.datetime(int(float(year)), int(float(month)), int(float(day))) + datetime.timedelta(seconds=time.timezone)
        if result==None:
             _id = ALBUM_COLLECTION.save({'name': title, 'url': url, 'hashUrl': hashUrl, 'date': date, 
                                   'type': getPrefixUrl(url), 'source': SITE_URL, 'website': 'xemanhdep.com', 'is_active': True})
             return _id
        else: cprint('Đã tồn tại album !', 'red'); return 0
    except:
        traceback.print_exc()
        return 0
        
def processAlbum(url):
    try:
        cprint(url, 'yellow')
        if url==None or url=='': return None
        if type(url).__name__ == 'unicode': url = url.encode('utf-8')
        year, month = getYearMonthFromUrl(url) 
        tree    = getXMLTree(url)
        dayNode = tree.xpath('//div[@class="date"]/div')
        if len(dayNode)>0: day = stringify(dayNode[0]).strip() 
        else: return None        
        titleNode = tree.xpath('//h1[@class="posttitle"]/a')
        if len(titleNode)>0: 
            title = stringify(titleNode[0]).strip()
            if year==None:
                url =  titleNode[0].get('href').strip()
                year, month = getYearMonthFromUrl(url)
        else: return None
        _albumId    = checkAlbumExists(title, day, month, year, url)
        print datetime.datetime(int(float(year)), int(float(month)), int(float(day)))
        if _albumId==0: return 1
        listNode    = tree.xpath('//div[@class="storycontent"]/p/img')
        if len(listNode)<=0: return 0
        for node in listNode:
            src = node.get('src').strip()
            if src!=None and src!='': 
                # Kiểm tra xem  ảnh có tồn tại hay chưa rôi mới insert vào database
                hashUrl = hashlib.md5(src).hexdigest()
                result = IMAGE_COLLECTION.find_one({'hashUrl': hashUrl}, {})
                if result==None:
                    count, source, name, size = saveImage(src, PREFIX, LOCAL_PATH);     
                    if count!=None:           
                        doc = {'name': name, 'caption': '', 'link': src, 'size': size, 'date': datetime.datetime.utcnow(), 'source': source, 
                               'website': 'xemanhdep.com', 'hashUrl': hashUrl, 'album_id': _albumId, 'is_active': True}
                        cprint(source, 'green')
                        IMAGE_COLLECTION.save(doc)
        return 0
    except:
        traceback.print_exc()
        return None
    
def getPrefixUrl(url):
    ''' Hàm kiểm tra URL
        return False: URL: http://xemanhdep.com
        return True: URL : http://.girlxinh.xemanhdep.com or http://gallery.xemanhdep.com
    '''
    try:
        preg    = re.compile(r'//(\w+)')
        m       = preg.search(url)
        if m: return m.group(1)
    except:
        traceback.print_exc()
         
def process(url):
    ''' Lấy tất cả các Album
    '''
    try:
        global gbhome, gbgirlxinh, gbgallery, flagHome, flagGallery, flagGirl
        prefixUrl = getPrefixUrl(url) 
        if prefixUrl=='girlxinh':
            if flagGirl: return
        else: 
            if flagGallery: return
        if gbgirlxinh>MAX_COUNT:
            cprint('Dừng xử lý vì trùng quá giới hạn', 'red'); 
            if getPrefixUrl(url)=='girlxinh': flagGirl = True;
        if gbgallery>MAX_COUNT:
            cprint('Dừng xử lý vì trùng quá giới hạn', 'red'); 
            if getPrefixUrl(url)=='gallery': flagGallery = True; 
        if prefixUrl=='xemanhdep':
            if flagHome: return
            cprint('Process : ' + url, 'yellow')
            tree = getXMLTree(url)
            listNode = tree.xpath('//div[@class="item"]')
            if len(listNode)>0:   
                for node in listNode:
                    if flagHome: return
                    if gbhome>MAX_COUNT:
                        cprint('Dừng xử lý vì trùng quá giới hạn', 'red'); flagHome = True
                    aNode = node.xpath('.//a')
                    if len(aNode) > 0: 
                        link = aNode[0].get('href').strip();
                        print 'Gbhome :' + str(gbhome) 
                        count = processAlbum(link); 
                        if count!=None: gbhome += count 
        elif prefixUrl=='girlxinh':
            cprint('Process : ' + url, 'yellow')
            print 'Gbgirlxinh :' + str(gbgirlxinh)  
            count = processAlbum(url); 
            if count!=None: gbgirlxinh += count 
            if flagHome: flagHome = False
            if flagGallery: flagGallery = False
        else:
            cprint('Process : ' + url, 'yellow')
            print 'Gbgallery :' + str(gbgallery)  
            count = processAlbum(url); 
            if count!=None: gbgallery += count; 
            if flagHome: flagHome = False
            if flagGirl: flagGirl = False
    except:
        traceback.print_exc()

def getMaxPage(url):
    try:
        lurl = url.strip(); maxPage = 1000
        if lurl==None or lurl=='': return None
        tree        =   getXMLTree(lurl)
        if lurl==SITE_URL:
            pageNode    =   tree.xpath('//div[@class="Nav"]//span')
        else:
            pageNode    =   tree.xpath('//span[@class="pages"]')
        if len(pageNode) > 0:
            text    =  stringify(pageNode[0]).strip()
            preg    =  re.compile(r'(\d+)$')
            m       =  preg.search(text)
            if m: return (int(float(m.group(1))) + 1)
        return maxPage
    except:
        traceback.print_exc()
        return None
    
if __name__ == '__main__':
    try:
        cprint('start crawler xemanhdep.com', 'yellow')
        listPage = []
        for url in LIST_URL:
            for page in range(1, getMaxPage(url)):
                listPage.append('{0}/page/{1}'.format(url, page))
        pool = workerpool.WorkerPool(size=8)
        pool.map(process, listPage)
        pool.shutdown(); pool.wait()
        print 'Finished.', datetime.datetime.now();
        pid = os.getpid();  os._exit(1); os.kill(pid, 9)
    except:
        traceback.print_exc()

