# -*- coding: utf-8 -*-
import lxml
import urllib, urllib2 
import re, sys, os
import mechanize
import datetime, time
import traceback
import workerpool
import hashlib
import urlparse
import binascii
import cStringIO as StringIO
import pycommonlib as pyclib
from lxml       import etree
from pymongo    import Connection
from urlparse   import urljoin
from termcolor  import cprint 

LOCAL_PATH      = '/home/hoangnamhai/HarvestedData/mobile9'
#MONGO_SERVER    = '27.0.12.106'   
#MONGO_SERVER    = 'beta.mana.vn'   
MONGO_SERVER    = 'localhost'   
MONGO_PORT      = 27017
#DATABASE        = 'my_database'
DATABASE        = 'mobile9'
PREFIX          = '/uploads/mobile9' 
SITE_URL        = 'http://gallery.mobile9.com/c/240-320-mobile-java-games'
GAME_URL        = 'http://gallery.mobile9.com/c/240-320-mobile-java-games/1'
BASE_SITE       = 'mobile9.com'
MAX_COUNT       = 100000
CONNECT         = Connection(MONGO_SERVER, MONGO_PORT)
DB              = CONNECT[DATABASE]
CONNECT2         = Connection('27.0.12.106', 27017)
DB2              = CONNECT2['my_database']
ARTICLE_COLLECTION      = DB['game_article']
CATEGORY_COLLECTION     = DB2['category']
USER_COLLECTION         = DB['backend_user']
stringify               = etree.XPath("string()") 
os.umask(0000)
flgCopy             = pyclib.getArgs()
ssh = None; sftp = None
if flgCopy!=None:
    ssh     = pyclib.createSSHClient('mana.vn', 22, 'daudm', '1p%^IRg')
    sftp    = ssh.open_sftp()

SCREEN  = ['100-100', '128-128', '128-160', '176-208', '176-220', '240-320']

CATEGORIES = {   
                 'Vietnam':        {'link' : 'VN', 'category': unicode('Game khác', 'utf-8')},
                 'Education':      {'link' : '41', 'category': unicode('Giáo dục', 'utf-8')},
                 'Entertainment':  {'link' : '42', 'category': unicode('Giải trí', 'utf-8')},
                 'Games':          {'link' : '43', 'category': unicode('Game khác', 'utf-8')},
                 'Medical':        {'link' : '44', 'category': unicode('Y học', 'utf-8')},
                 'Multimedia':     {'link' : '45', 'category': unicode('Đa phương tiện', 'utf-8')},
                 'Productivity':   {'link' : '46', 'category': unicode('Năng suất', 'utf-8')},
                 'Professional':   {'link' : '47', 'category': unicode('Chuyên nghiệp', 'utf-8')},
                 'Reference':      {'link' : '48', 'category': unicode('Game khác', 'utf-8')},
                 'Travel':         {'link' : '49', 'category': unicode('Du lịch', 'utf-8')},
                 'Utilities':      {'link' : '50', 'category': unicode('Tiện ích', 'utf-8')},
                 'User Interface': {'link' : '51', 'category': unicode('Giao diện người dùng', 'utf-8')},
                 'Security':       {'link' : '52', 'category': unicode('Bảo mật', 'utf-8')},
                 'Internet':       {'link' : '53', 'category': unicode('Internet', 'utf-8')},
                 'Miscellaneous':  {'link' : '54', 'category': unicode('Game khác', 'utf-8')},
                 'Emulator':       {'link' : '43', 'category': unicode('Đối kháng', 'utf-8')},
                 'Sexy':           {'link' : '103', 'category': unicode('Người đẹp', 'utf-8')},
             }

LEFT = {} 
def getRootLft(name='Game khác'):
    try:
        root_id = 0; lft = 0
        result = CATEGORY_COLLECTION.find_one({'root_id': 129, 'data': name}, {'root_id': 1, 'lft': 1})
        if result!=None:
            root_id = result['root_id']; lft = result['lft']
        return root_id, lft
    except:
        traceback.print_exc()

for key, value in CATEGORIES.iteritems():
    root_id, lft = getRootLft(value['category'])
    if root_id!=None:
        LEFT[key] = lft
print LEFT       

def getXMLTree(url, isXML=False, userAgent=False, outputHTML=False, returnHTML=False):
    try:
        ''' Hàm xây dựng cây:
            - Đọc HTML từ URL đưa vào
            - options : encoding='utf-8' override phương thức encoding
            = isXML : 
                +) False : HTMLParser
                +) True  : XMLParser
        '''
        if type(url).__name__ == 'unicode': url = url.encode('utf-8')
        if userAgent:
            user_agent  = 'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'
            opener      = mechanize.build_opener(mechanize.HTTPRefererProcessor)
            opener.addheaders = [("User-agent", user_agent)]
            response    = opener.open(url)
        else:
            response    = urllib.urlopen(url)
        html    = response.read()
        if outputHTML: print(html)
        if isXML==False: parser  = etree.HTMLParser(encoding='utf-8')
        else: parser = etree.XMLParser(encoding='utf-8')
        tree    = etree.parse(StringIO.StringIO(html), parser)
        if returnHTML:
            return tree, html
        return tree
    except:
        traceback.print_exc()

def getAuthor(name='crawler'):
    try:
        result = USER_COLLECTION.find_one({'username': name}, {})
        if result==None: result = USER_COLLECTION.find_one({'username': 'Hindua88'}, {})
        if result!=None:
            return result['_id']
    except:
        traceback.print_exc()

def getPathWithName(src):
    ''' Hàm lấy tên ảnh ex: abcd
        và đường dẫn ex: a/b/c/d
    '''
    try:
        if type(src).__name__ == 'unicode': src = src.encode('utf-8')
        md5str = hashlib.md5(src).hexdigest()
        path = '{0}/{1}/{2}/{3}'.format(md5str[0], md5str[1], md5str[2], md5str[3])
        return md5str, path
    except:
        traceback.print_exc()
        return None, None
 
def downloadFileJar(url, link, ssh=None, sftp=None):
    try:
        u = urllib2.urlopen(url)
        meta = u.info()
        hUrl, path = getPathWithName(BASE_SITE + getGameId(link))
        hashUrl = hashlib.md5(BASE_SITE + getGameId(link)).hexdigest()
        disposition = meta.getheaders("Content-Disposition")[0]
        preg        = re.compile(r'filename=(.+)$')
        m           = preg.search(disposition)
        if m: file_name = m.group(1)
        else: file_name = '{0}.{1}'.format(hashUrl, 'jar')
        file_type = meta.getheaders("Content-Type")[0]
        preg        = re.compile(r'java')
        m           = preg.search(file_type)
        if m==None: print disposition; cprint('Không phải file jar !', 'red'); return None, None, None, None
        file_size = int(meta.getheaders("Content-Length")[0])
        path_file = '{0}/{1}/{2}.{3}'.format(LOCAL_PATH, path, hashUrl, 'jar')
        print path_file
        path_dir  = '{0}/{1}'.format(LOCAL_PATH, path)
        source    = '{0}/{1}/{2}.{3}'.format(PREFIX, path, hashUrl, 'jar')
        result    = 0
        if not os.path.isfile(path_file):
            if not os.path.exists(path_dir): os.makedirs(path_dir, 0777)
            print "Downloading: %s Bytes: %s" % (path_file, file_size)
            f = open(path_file, 'wb')
            while True:
                buffer = u.read(8192)
                if not buffer: break
                f.write(buffer)
            f.close()
            u.close()
        else: cprint('File đã tồn tai !', 'red'); result = 1
        if ssh!=None and sftp!=None:
            #copy to 27.0.12.106
            stdin, stdout, stderr   = ssh.exec_command("mkdir -p -m 0777 {0}".format(path_dir))
            remote_file = path_file
            info = pyclib.rexists(sftp, remote_file)
            if not info:
                flgRun = False
                for i in range(0, 3):
                    try:
                        sftp.put(path_file, remote_file)
                        print 'Copy file: ', path_file, ' => ', remote_file
                        flgRun = True; break
                    except:
                       continue 
                if flgRun:
                    cprint('Lưu file thành công!', 'green');
                else: 
                    cprint('Không upload được file lên', 'red'); result = None
            else: cprint('File đã tồn tại trên server!', 'red')
            '''
            ssh_script  = 'ssh {0} "mkdir -p {1}; chmod -R 777 {2}"'.format(remoteHost, path_dir, '{0}/{1}'.format(LOCAL_PATH, path[:1]))
            os.system(ssh_script); print ssh_script
            scp_script  = 'scp "{0}" "{1}:{2}"'.format(path_file, remoteHost, path_dir)
            os.system(scp_script); print scp_script
            '''
        return result, source, file_name, file_size
    except:
        traceback.print_exc()
        return None, None, None, None

def getGameId(link):
    try:
        if link==None or link=='': return 
        preg    = re.compile('/(\d+)/')
        m       = preg.search(link)
        if m: return m.group(1)
    except:
        traceback.print_exc()

def processGameWithLink(link, cat, screen):
    try:
        intro = ''; download = {}; images = {}; title = ''; tags = []; flgTag = False
        cprint('Process: ' + link, 'yellow')
        #gameId = getGameId(link)
        #if gameId==None: return
        ilink		= link.replace('?op=dpc', '')
        hashLink = pyclib.getMd5(ilink)
        check_exists = ARTICLE_COLLECTION.find_one({'hashLink': hashLink}, {})
        if check_exists!=None: cprint('Game đã tồn tại trong cơ sỡ dữ liệu', 'red'); return 1        
        itree 		= getXMLTree(ilink)
        contentNode	= itree.xpath('//div[@id="content"]//div[@class="floated_left w660"]')
        if len(contentNode) > 0: primaryNode	= contentNode[0]
        else: cprint('Sai XPath => không thể lấy download được game.', 'red'); return
        titleNode   	= primaryNode.xpath('./h1')
        if len(titleNode)>0: title = stringify(titleNode[0]).strip()
        descNode 	= primaryNode.xpath('./div[@class="floated_left w420"]/div[@id="tabs"]/div[@id="tab-1"]')
        if len(descNode)>0: 
            intro = stringify(descNode[0]).strip(); intro = pyclib.toAscii(intro); intro = ' '.join(intro.split())    
        listTagNode     = primaryNode.xpath('.//div[@id="tabs"]/div[@id="tab-2"]/dl/*')

        for tagNode in listTagNode:
            tagContent  = stringify(tagNode).strip()			
            if tagContent==None or len(tagContent)<2: continue
            if flgTag: 
                flgTrag = False
                listTag = tagNode.xpath('.//a')
                for t in listTag: tags.append(stringify(t).strip());
                break	
            if tagContent=='Tags': flgTag = True
            
        imgNode     = primaryNode.xpath('./div[@class="floated_left w420"]//div[@class="gallery"]//a')
        if len(imgNode)>0:
            linkImage = imgNode[0].get('href')
            result = None; source = file_name = ''; size = 0
            if flgCopy!=None:
                result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
            else:
                result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
            images = {'original_name': file_name, 'size': size, 'source': source}
                    
        tree, html = getXMLTree(link, returnHTML=True)
        preg    = re.compile(r'var red = "(.+)";')
        m       = preg.search(html)
        if m: 
            cprint(m.group(1), 'green')
            result = None; source = file_name = ''; size = 0
            if flgCopy!=None:
                result, source, file_name, size = downloadFileJar(m.group(1), link, ssh, sftp)
            else:
                result, source, file_name, size = downloadFileJar(m.group(1), link)
            if result!=None: 
            	download = {'original_name': file_name, 'size': size, 'source': source}; cprint(source, 'yellow')
            '''
            root_id, lft = getRootLft(CATEGORIES[cat]['category'])
            if root_id==0: root_id, lft = getRootLft()
            '''
            root_id = 129
            lft     = 1 
            if LEFT.has_key(cat): lft = LEFT[cat]
            else: cprint('Chưa tồn tại category.', 'red'); return
            print root_id, lft
            author      =  getAuthor() 
            doc = { 'hashLink'        : hashLink,  
                    'root'          : root_id,  
                    'category'      : lft,
                    'content'       : {'intro': intro, 'download': download, 'image': images},
                    'created_at'    : datetime.datetime.utcnow(),
                    'date'          : datetime.datetime.utcnow(),
                    'is_active'     : True,
                    'price'         : False,
                    'source'        : 'mobile9.com',
                    'title'         : title,
                    'link'          : ilink,
                    'type'          : 'jar',
                    'screen'        : screen,
                    'updated_at'    : datetime.datetime.utcnow(),
                    'tags'          : tags,
                    'author_id'     : author,
                    'subcategory'   : cat
                 }
            ARTICLE_COLLECTION.save(doc)
            print 'Information : '
            print pyclib.toAscii('Title: '+ title)
            print 'hashLink: ', hashLink
            print pyclib.toAscii('Intro: '+ intro)
            print 'Download: ', download
            print 'Images: ', images
            cprint(tags, 'green')
        return 0
    except:
        traceback.print_exc()
        return 0

def processPage(lurl, cat, screen):
    try:
        gbcount = 0
        cprint('Process page : ' + lurl, 'yellow')
        tree       = getXMLTree(lurl)
        listGame   = tree.xpath('//div[@id="content"]//div[@class="gallery"]/a')
        for game in listGame:
            link    = '{0}{1}'.format(game.get('href'), '?op=dpc')
            count   = processGameWithLink(link, cat, screen)
            if count!=None: gbcount += count
        return gbcount
    except:
        traceback.print_exc()
            
def getMaxPage(url):
    try:
        maxPage = 1
        tree = getXMLTree(url)
        pageNode = tree.xpath('//div[@id="content"]//div[@class="page"]//strong')
        if len(pageNode)>0:
            text = stringify(pageNode[0]).strip()
            preg = re.compile(r'(\d+)$')
            m    = preg.search(text)
            if m: maxPage = int(float(m.group(1))) + 1
        return maxPage
    except:
        traceback.print_exc()

def processCategory(cat):
    try:
        '''
        root_id, lft = getRootLft(CATEGORIES[cat]['category'])
        print cat, root_id, lft
        return
        '''
        for item in SCREEN:
            site_url = 'http://gallery.mobile9.com/c/{0}-mobile-java-games'.format(item)
            screen = item.replace('-', 'x')
            #for i in ["1", "3", "2"]:
            for i in ["3"]:
                gbcount = 0
                idCat   = CATEGORIES[cat]['link']
                lurl = '{0}{1}'.format(site_url, '/1/?st=1&fi={0}&ff=on'.format(idCat))
                cprint('Get max page : ' + lurl, 'yellow')
                for page in range(1, getMaxPage(lurl)): 
                    print 'GLOBAL COUNT: ', gbcount
                    if gbcount > MAX_COUNT: cprint('Dừng xử lý vì vượt quá số lượng cho phép.', 'red');     return
                    count = processPage('{0}/{1}/?st={2}&fi={3}&ff=on'.format(site_url, page, i, idCat), cat, screen); 
                    if count!=None: gbcount += count
    except:
        traceback.print_exc()

if __name__ == '__main__':
    try:
        cprint('start crawler mobile9.com', 'yellow')
        #processCategory('Games')
        pool = workerpool.WorkerPool(size=8)
        pool.map(processCategory, CATEGORIES.keys())
        pool.shutdown(); pool.wait()
        if flgCopy!=None: ssh.close()
        pid = os.getpid();  os._exit(1); os.kill(pid, 9)
    except:
        traceback.print_exc()
        
