# -*- coding: utf-8 -*-
import lxml
import urllib, urllib2 
import re, sys, os
import mechanize
import datetime, time
import traceback
import workerpool
import hashlib
import urlparse
import binascii
import cStringIO as StringIO
import pycommonlib as pyclib
from lxml       import etree
from pymongo    import Connection
from urlparse   import urljoin
from termcolor  import cprint 

LOCAL_PATH      = '/home/hoangnamhai/HarvestedData/mobile9'
#MONGO_SERVER    = '27.0.12.106'   
MONGO_SERVER    = 'beta.mana.vn'   
MONGO_PORT      = 27017
#DATABASE        = 'my_database'
DATABASE        = 'mobile9'
PREFIX          = '/uploads/mobile9' 
BASE_SITE       = 'mobile9.com'
MAX_COUNT       = 1000
CONNECT         = Connection(MONGO_SERVER, MONGO_PORT)
DB              = CONNECT[DATABASE]
RINGTONES_COLLECTION      = DB['ringtones_article']
#RINGTONES_COLLECTION      = DB['ringtones']
CATEGORY_COLLECTION     = DB['category']
USER_COLLECTION         = DB['backend_user']
stringify               = etree.XPath("string()") 
os.umask(0000)
flgCopy             = pyclib.getArgs()
ssh = None; sftp = None
if flgCopy!=None:
    ssh     = pyclib.createSSHClient('mana.vn', 22, 'daudm', '1p%^IRg')
    sftp    = ssh.open_sftp()

TYPES  = {
    'midi'      : 'midi-ringtones',
    'mp3'       : 'mp3-ringtones',
    'real'      : 'real-ringtones',
    'wav'       : 'wav-ringtones',
    'iphone'    : 'apple-iphone-ringtones',
    'cingular'  : 'cingular-ringtones',
} 

CATEGORIES = {   
                 'Vietnam'          : {'link' : 'VN', 'category': ''},
                 'Alert'            : {'link' : '25', 'category': ''},
                 'Bollywood'        : {'link' : '93', 'category': ''},
                 'Classical'        : {'link' : '32', 'category': ''},
                 'Electronic'       : {'link' : '34', 'category': ''},
                 'Festive'          : {'link' : '94', 'category': ''},
                 'Funny'            : {'link' : '68', 'category': ''},
                 'Instrumental'     : {'link' : '97', 'category': ''},
                 'International'    : {'link' : '70', 'category': ''},
                 'Jazz'             : {'link' : '35', 'category': ''},
                 'Miscellaneous'    : {'link' : '27', 'category': ''},
                 'Original'         : {'link' : '26', 'category': ''},
                 'Rap & Hip Hop'    : {'link' : '37', 'category': ''},
                 'Remix'            : {'link' : '95', 'category': ''},
                 'Rock & Pop'       : {'link' : '33', 'category': ''},
                 'SMS'              : {'link' : '69', 'category': ''},
                 'Sayings'          : {'link' : '98', 'category': ''},
                 'Sound Effects'    : {'link' : '96', 'category': ''},
                 'Theme Music'      : {'link' : '36', 'category': ''},
             }

NAME_CATEGORIES  = {
    'midi'      : 'MIDI Ringtones',
    'mp3'       : 'MP3 Ringtones',
    'real'      : 'Real Ringtones',
    'wav'       : 'WAV Ringtones',
    'iphone'    : 'Apple iPhone Ringtones',
    'cingular'  : 'Cingular Ringtones',
} 
conn    = Connection('27.0.12.106', 27017)
db2     = conn['my_database']
def getCategory(name, root_id=142):
    try:    
        collection  = db2['category']
        result      = collection.find_one({'root_id': root_id, 'data': name}, {'root_id': 1, 'lft': 1})
        if result==None: return None, None
        return result['root_id'], result['lft']
    except:
        traceback.print_exc()
        return None, None

LEFT    =  {}       
for key, value in NAME_CATEGORIES.iteritems():
    root_id, lft = getCategory(value)
    if root_id==None: cprint('Chưa tồn tại category', 'red')
    else: LEFT[key] = lft

def getXMLTree(url, isXML=False, userAgent=False, outputHTML=False, returnHTML=False):
    try:
        ''' Hàm xây dựng cây:
            - Đọc HTML từ URL đưa vào
            - options : encoding='utf-8' override phương thức encoding
            = isXML : 
                +) False : HTMLParser
                +) True  : XMLParser
        '''
        if type(url).__name__ == 'unicode': url = url.encode('utf-8')
        if userAgent:
            user_agent  = 'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'
            opener      = mechanize.build_opener(mechanize.HTTPRefererProcessor)
            opener.addheaders = [("User-agent", user_agent)]
            response    = opener.open(url)
        else:
            response    = urllib.urlopen(url)
        html    = response.read()
        if outputHTML: print(html)
        if isXML==False: parser  = etree.HTMLParser(encoding='utf-8')
        else: parser = etree.XMLParser(encoding='utf-8')
        tree    = etree.parse(StringIO.StringIO(html), parser)
        if returnHTML:
            return tree, html
        return tree
    except:
        traceback.print_exc()

def getAuthor(name='crawler'):
    try:
        result = USER_COLLECTION.find_one({'username': name}, {})
        if result==None: result = USER_COLLECTION.find_one({'username': 'Hindua88'}, {})
        if result!=None:
            return result['_id']
    except:
        traceback.print_exc()

def getRootLft(name='Ringtones'):
    try:
        root_id = 0; lft = 0
        result = CATEGORY_COLLECTION.find_one({'data': name}, {'root_id': 1, 'lft': 1})
        if result!=None:
            root_id = result['root_id']; lft = result['lft']
        return root_id, lft
    except:
        traceback.print_exc()
        return None, None
        
def getPathWithName(src):
    ''' Hàm lấy tên ảnh ex: abcd
        và đường dẫn ex: a/b/c/d
    '''
    try:
        if type(src).__name__ == 'unicode': src = src.encode('utf-8')
        md5str = hashlib.md5(src).hexdigest()
        path = '{0}/{1}/{2}/{3}'.format(md5str[0], md5str[1], md5str[2], md5str[3])
        return md5str, path
    except:
        traceback.print_exc()
        return None, None
 
def downloadFile(url, link, ssh=None, sftp=None):
    try:
        if not url.startswith('http'): 
            if url.startswith('/'): url = 'http://angelnaina.mobile9.com/download{0}'.format(url)
            else: url = 'http://angelnaina.mobile9.com/download/{0}'.format(url)
        #cprint('Download from: {0}'.format(url), 'yellow')
        u = urllib2.urlopen(url)
        meta = u.info()
        hUrl, path = getPathWithName(BASE_SITE + getId(link))
        hashUrl = hashlib.md5(BASE_SITE + getId(link)).hexdigest()
        disposition = meta.getheaders("Content-Disposition")[0]
        preg        = re.compile(r'filename=(.+)$')
        m           = preg.search(disposition)
        if m: file_name = m.group(1)
        else: file_name = '{0}.{1}'.format(hashUrl, 'mp3')
        file_type = meta.getheaders("Content-Type")[0]
        preg        = re.compile(r'audio')
        m           = preg.search(file_type)
        if m==None: print disposition; cprint('Không phải file audio !', 'red'); return None, None, None, None
        file_size = int(meta.getheaders("Content-Length")[0])
        basename, file_ext  = os.path.splitext(file_name)
        if len(file_ext)<1: file_ext = '.mp3'
        path_file = '{0}/{1}/{2}{3}'.format(LOCAL_PATH, path, hashUrl, file_ext)
        print path_file
        path_dir  = '{0}/{1}'.format(LOCAL_PATH, path)
        source    = '{0}/{1}/{2}{3}'.format(PREFIX, path, hashUrl, file_ext)
        result    = 0
        if not os.path.isfile(path_file):
            if not os.path.exists(path_dir): os.makedirs(path_dir, 0777)
            print "Downloading: %s Bytes: %s" % (path_file, file_size)
            f = open(path_file, 'wb')
            while True:
                buffer = u.read(8192)
                if not buffer: break
                f.write(buffer)
            f.close()
            u.close()
        else: cprint('File đã tồn tai !', 'red'); result = 1
        if ssh!=None and sftp!=None:
            #copy to 27.0.12.106
            stdin, stdout, stderr   = ssh.exec_command("mkdir -p -m 0777 {0}".format(path_dir))
            remote_file = path_file
            info = pyclib.rexists(sftp, remote_file)
            if not info:
                flgRun = False
                for i in range(0, 3):
                    try:
                        sftp.put(path_file, remote_file)
                        print 'Copy file: ', path_file, ' => ', remote_file
                        flgRun = True; break
                    except:
                       continue 
                if flgRun:
                    cprint('Lưu file thành công!', 'green');
                else: 
                    cprint('Không upload được file lên', 'red'); result = None
            else: cprint('File đã tồn tại trên server!', 'red')
            '''
            ssh_script  = 'ssh {0} "mkdir -p {1}; chmod -R 777 {2}"'.format(remoteHost, path_dir, '{0}/{1}'.format(LOCAL_PATH, path[:1]))
            os.system(ssh_script); print ssh_script
            scp_script  = 'scp "{0}" "{1}:{2}"'.format(path_file, remoteHost, path_dir)
            os.system(scp_script); print scp_script
            '''
        return result, source, file_name, file_size
    except:
        traceback.print_exc()
        return None, None, None, None

def getId(link):
    try:
        if link==None or link=='': return 
        preg    = re.compile('/(\d+)/')
        m       = preg.search(link)
        if m: return m.group(1)
    except:
        traceback.print_exc()

def processGameWithLink(link, cat, rttype):
    try:
        intro = ''; download = {}; preview = {}; title = ''; tags = []; flgTag = False
        cprint('Process: ' + link, 'yellow')
        hashLink = pyclib.getMd5(link)
        check_exists = RINGTONES_COLLECTION.find_one({'hashLink': hashLink}, {})
        if check_exists!=None: cprint('Game đã tồn tại trong cơ sỡ dữ liệu', 'red'); return 1        
        ilink		= link.replace('?op=dpc', '')
        itree 		= getXMLTree(ilink)
        contentNode	= itree.xpath('//div[@id="content"]//div[@class="floated_left w660"]')
        if len(contentNode) > 0: primaryNode	= contentNode[0]
        else: cprint('Sai XPath => không thể lấy download được game.', 'red'); return
        titleNode   	= primaryNode.xpath('./h1')
        if len(titleNode)>0: title = stringify(titleNode[0]).strip()
        descNode 	= primaryNode.xpath('./div[@class="floated_left w420"]/div[@id="tabs"]/div[@id="tab-1"]')
        if len(descNode)>0: 
            intro = stringify(descNode[0]).strip(); intro = ' '.join(intro.split())    
        listTagNode     = primaryNode.xpath('.//div[@id="tabs"]/div[@id="tab-2"]/dl/*')

        for tagNode in listTagNode:
            tagContent  = stringify(tagNode).strip()			
            if tagContent==None or len(tagContent)<2: continue
            if flgTag: 
                flgTrag = False
                listTag = tagNode.xpath('.//a')
                for t in listTag: tags.append(stringify(t).strip());
                break	
            if tagContent=='Tags': flgTag = True
        '''    
        imgNode     = primaryNode.xpath('./div[@class="floated_left w420"]//div[@class="gallery"]//a')
        if len(imgNode)>0:
            linkImage = imgNode[0].get('href')
            result = None; source = file_name = ''; size = 0
            if flgCopy!=None:
                result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
            else:
                result, source, file_name, size = pyclib.saveImage(linkImage, PREFIX, LOCAL_PATH)
            images = {'original_name': file_name, 'size': size, 'source': source}
        '''            
        tree, html = getXMLTree(link, returnHTML=True)
        preg    = re.compile(r'var red = "(.+)";')
        m       = preg.search(html)
        if m: 
            cprint(m.group(1), 'green')
            result = None; source = file_name = ''; size = 0
            if flgCopy!=None:
                result, source, file_name, size = downloadFile(m.group(1), link, ssh, sftp)
            else:
                result, source, file_name, size = downloadFile(m.group(1), link)
            if result!=None: 
            	download = {'original_name': file_name, 'size': size, 'source': source}; cprint(source, 'yellow')
           
            root_id = 142; lft = 1
            if LEFT.has_key(cat): root_id, lft = LEFT[cat]
            else: cprint('Không get được left', 'red')
            print root_id, lft
            author      =  getAuthor() 
            doc = { 'hashLink'        : hashLink,  
                    'root'          : root_id,  
                    'category'      : lft,
                    'content'       : {'intro': intro, 'download': download, 'preview': download},
                    'created_at'    : datetime.datetime.utcnow(),
                    'date'          : datetime.datetime.utcnow(),
                    'is_active'     : True,
                    'price'         : False,
                    'source'        : 'mobile9.com',
                    'title'         : title,
                    'link'          : ilink,
                    'ringtones_type'          : rttype,
                    'type'          : 'mp3',
                    'updated_at'    : datetime.datetime.utcnow(),
                    'tags'          : tags,
                    'author_id'     : author
                 }
            RINGTONES_COLLECTION.save(doc)
            print 'Information : '
            print pyclib.toAscii('Title: {0}'.format(title))
            print 'hashLink: ', hashLink
            print pyclib.toAscii('Intro: {0}'.format(intro))
            print 'Download: ', download
            #print 'Images: ', images
            cprint(tags, 'green')
		
        return 0
    except:
        traceback.print_exc()
        return 0

def processPage(lurl, cat, rttype):
    try:
        gbcount = 0
        cprint('Process page : ' + lurl, 'yellow')
        tree       = getXMLTree(lurl)
        listGame   = tree.xpath('//div[@id="content"]//div[@class="gallery"]/a')
        for game in listGame:
            link    = '{0}{1}'.format(game.get('href'), '?op=dpc')
            count   = processGameWithLink(link, cat, rttype)
            if count!=None: gbcount += count
        return gbcount
    except:
        traceback.print_exc()
            
def getMaxPage(url):
    try:
        maxPage = 10
        tree = getXMLTree(url)
        pageNode = tree.xpath('//div[@id="content"]//div[@class="page"]//strong')
        if len(pageNode)>0:
            text = stringify(pageNode[0]).strip()
            preg = re.compile(r'(\d+)$')
            m    = preg.search(text)
            if m: maxPage = int(float(m.group(1))) + 1
        return maxPage
    except:
        traceback.print_exc()

def processCategory(cat):
    try:
        '''
        root_id, lft = getRootLft(CATEGORIES[cat]['category'])
        print cat, root_id, lft
        return
        '''
        for key, value in TYPES.iteritems():
            site_url = 'http://gallery.mobile9.com/c/{0}'.format(value)
            rttype   = key
            for i in ["1", "3", "2"]:
                gbcount = 0
                idCat   = CATEGORIES[cat]['link']
                lurl = '{0}{1}'.format(site_url, '/1/?st=1&fi={0}&ff=on'.format(idCat))
                cprint('Get max page : ' + lurl, 'yellow')
                for page in range(1, getMaxPage(lurl)): 
                    print 'GLOBAL COUNT: ', gbcount
                    if gbcount > MAX_COUNT: cprint('Dừng xử lý vì vượt quá số lượng cho phép.', 'red');     return
                    count = processPage('{0}/{1}/?st={2}&fi={3}&ff=on'.format(site_url, page, i, idCat), cat, rttype); 
                    if count!=None: gbcount += count
    except:
        traceback.print_exc()

if __name__ == '__main__':
    try:
        cprint('start crawler mobile9.com', 'yellow')
        pool = workerpool.WorkerPool(size=1)
        pool.map(processCategory, CATEGORIES.keys())
        pool.shutdown(); pool.wait()
        if flgCopy!=None: ssh.close()
        pid = os.getpid();  os._exit(1); os.kill(pid, 9)
    except:
        traceback.print_exc()
        
