# -*- coding: utf-8 -*-
import pycommonlib as pyclib
import workerpool
import traceback
import datetime, time
import re, os
import hashlib, urllib, urlparse
import threading

from pymongo import Connection
from termcolor  import cprint

LOCAL_PATH = '/home/data1/zedge'
MONGO_SERVER = 'localhost'   
MONGO_PORT = 27017
DATABASE = 'zedge'
PREFIX = '/uploads/zedge' 
BASE_SITE = 'http://www.zedge.net'
MAX_COUNT = 1000000
CONNECT = Connection(MONGO_SERVER, MONGO_PORT)
DB = CONNECT[DATABASE]
globalSiteUrl = ''
IMAGE_COLLECTION = DB['wallpapers']
start = 0

URLS = { 
        #'iphone4s': {'url': 'http://www.zedge.net/wallpapers/5234/iphone-iphone-4s-wallpapers'},
        'galaxySII': {'url': 'http://www.zedge.net/wallpapers/5305/samsung-galaxy-s-ii-lte-wallpapers/'}
       }
CATEGORIES = {   
                 'Abstract' : '13-6',
                 'Anime' : '17-6',
                 'Auto & Vehicles' : '9-6',
                 'Babes' : '14-6',
                 'Bollywood' : '16-6',
                 'Comedy' : '1-6',
                 'Designs' : '21-6',
                 'Drawn - Cartoons' : '6-6',
                 'Entertainment' : '3-6',
                 'Games' : '18-6',
                 'Holiday - Occasions' : '19-6',
                 'Hunks' : '15-6',
                 'Logos' : '8-6',
                 'Love' : '22-6',
                 'Music' : '4-6',
                 'Nature - Landscape' : '5-6',
                 'News & Politics' : '23-6',
                 'Other' : '10-6',
                 'People' : '24-6',
                 'Pets & Animals' : '11-6',
                 'Signs & Sayings' : '25-6',
                 'Spiritual' : '26-6',
                 'Sport' : '7-6',
                 'Technology' : '2-6',
                 'Zedge Latino' : '20-6',
             }

def getRootLft(name):
    try:
        category = DB['category']
        root_id = 1; lft = 0
        result = category.find_one({'data': name}, {'root_id': 1, 'lft': 1})
        if result!=None:
            root_id = result['root_id']; lft = result['lft']
        else:
            record = category.find({}, {'root_id': 1, 'rgt': 1}).sort('rgt', -1).limit(1)
            for row in record: 
                lft = row['rgt'] + 50; break
            rgt = lft + 50
            if lft==0: lft = 1 
            doc = {'root_id': 1, 'lft': lft, 'rgt': rgt, 'data': name}
            category.save(doc)
        return root_id, lft
    except:
        traceback.print_exc()

ROOTLEFT = {}
for key, value in CATEGORIES.iteritems():
    root_id, lft = getRootLft(key)
    ROOTLEFT[key] = lft

def processItem(link, cat):
    try:
        if link==None or link=='': return
        cprint('Process: ' + link, 'green')
        hashLink = pyclib.getMd5(link)
        check_exists = IMAGE_COLLECTION.find_one({'hashLink': hashLink})
        if check_exists!=None:
            cprint('Document đã tồn tại trong database!', 'red'); return 1
        else:
            cprint('hashLink: ' + str(hashLink), 'yellow')
            if hashLink==None: return
            title = ''
            tree = pyclib.getXMLTree(link)
            if tree==None: cprint('Không build được tree.', 'red'); return
            contentNode	= tree.xpath('//div[@id="wrapper"]//div[@id="content"]//div[@class="box"]')
            if len(contentNode) > 0: primaryNode	= contentNode[0]
            else: cprint('Sai XPath => không thể lấy download được game.', 'red'); return
            titleNode   	= primaryNode.xpath('./div/h2')
            if len(titleNode)>0: title = pyclib.getStringWithNode(titleNode[0])

            listNode = primaryNode.xpath('./div[@class="body"]//div[@class="fullSize"]/a')
            if len(listNode)>0:
                src = listNode[0].get('href')
                if src==None and src=='': return
                ctree = pyclib.getXMLTree(src)
                imgNode = ctree.xpath('//div[@class="previewBody"]//img') 
                if len(imgNode) == 0: cprint('Không lấy được ảnh khi chọn nút fullSize', 'red'); return
                linkImage = imgNode[0].get('src') 

                result = None; source = file_name = ''; size = 0
                result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
                if result!=None: 
                    images = {'original_name': file_name, 'size': size, 'source': source}
                    dtnow = datetime.datetime.now()
                    root_id = 1
                    if ROOTLEFT.has_key(cat): lft = ROOTLEFT[cat]
                    else: cprint('không lấy được lft của category.', 'red'); return 
                    doc = { 'hashLink': hashLink,
                            'link': link,
                            'content': {'image': images},
                            'date': datetime.datetime.utcnow(), 
                            'title': title, 
                            'source': 'zedget.com', 
                            'is_active': True, 
                            'root': root_id,
                            'category': lft,
                            'type': 'wallpaper',
                            'update': dtnow,
                            'lastupdate': dtnow
                            }
                    IMAGE_COLLECTION.save(doc); 
                    print 'Title: ', pyclib.toAscii(title)
                    print 'Source image: ', images['source']
                    print 'Root, lft: ', root_id, lft
                    return 0
    except:
        traceback.print_exc()

def processCategory(cat):
    try:
        global globalSiteUrl
        globalCount = 0; count = 0
        turl = ''; url = globalSiteUrl
        if not url.endswith('/'):
            turl = '{0}/{1}'.format(url, CATEGORIES[cat])
        else: turl = '{0}{1}'.format(url, CATEGORIES[cat])
        for page in range(1, 1000):
            if count > 3: cprint('Ba trang không lấy được dữ liệu => dừng', 'red'); break
            if globalCount > MAX_COUNT: cprint('Số lượng trùng lặp trong category(' + cat + ' vượt qua giới hạn cho phép.'); break
            lurl = '{0}-{1}'.format(turl, page)
            cprint('Process: ' + lurl, 'yellow')
            tree = pyclib.getXMLTree(lurl)
            listNode = tree.xpath('//div[@id="wrapper"]//div[@class="content"]//div[@id="browsediv"]//div[contains(@class, "itemcontainer")]')
            if len(listNode) == 0: count += 1 
            for node in listNode:
                itemNode = node.xpath('./div[@class="itembody"]/div/a')
                if len(itemNode) > 0:
                    link = urlparse.urljoin(BASE_SITE, itemNode[0].get('href'))
                    #print 'Link: ', link
                    result = processItem(link, cat)
                    if result!=None: globalCount += 1
    except:
        traceback.print_exc()

def processAll():
    global globalSiteUrl
    try:
        for k, v in URLS.iteritems():
            globalSiteUrl = v['url']
            pool = workerpool.WorkerPool(size=4)
            pool.map(processCategory, CATEGORIES.keys())
            pool.shutdown(); pool.wait()
    except:
        traceback.print_exc()

def timeOut():        
    while True:
        delta = time.time() - start
        if delta > 1800:
            print 'Dừng chương trình vì vượt quá thời gian chạy.', datetime.datetime.now()
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        time.sleep(30)

if __name__ == '__main__':
    try:
        cprint('start crawler zedge.net', 'yellow')
        start = time.time() 
        timeout = threading.Thread(target=timeOut).start()
        processAll()
        print 'Finished.', datetime.datetime.now();
        pid = os.getpid();  os._exit(1); os.kill(pid, 9)
    except:
        traceback.print_exc()
