# -*- coding: utf-8 -*-
''' @author: Dinh Manh Dau <dinhmanhdau@gmail.com>
'''
import  workerpool
import  traceback
import  datetime, time
import  re, os

import  pycommonlib as pyclib
import  html2textlib
import  HTMLParser
import  cStringIO as StringIO
import  urllib
import  threading
import  urllib2

from    pymongo     import  Connection
from    termcolor   import  cprint
from    urlparse    import  urlparse
from    urlparse    import  urljoin
from    lxml        import  etree

LOCAL_PATH          = '/home/data1/livewallpapers'
DOCUMENT_STORE_PATH = '/home/hoangnamhai/HarvestedData/livewallpapers'
#MONGO_SERVER        = 'beta.mana.vn'   
MONGO_SERVER        = 'localhost'   
MONGO_PORT          = 27017
DATABASE            = 'livewallpapers'
PREFIX              = '/uploads/livewallpapers' 
SITE_URL            = 'http://www.livewallpapers.org'
MAX_COUNT           = 50
MAX_ARTICLE         = 10000
CONNECT             = Connection(MONGO_SERVER, MONGO_PORT)
DB                  = CONNECT[DATABASE]
CONNECT2            = Connection('27.0.12.106', MONGO_PORT)
DB2                 = CONNECT2['my_database']
MAX_PAGE            = 20
ARTICLE_COLLECTION  = DB['article']
CATEGORY_COLLECTION = DB2['category']
USER_COLLECTION     = DB2['backend_user']
USER_AGENT          = 'Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17'
os.umask(0000)

flgCopy             = pyclib.getArgs()
ssh = None; sftp = None
if flgCopy!=None:
    ssh     = pyclib.createSSHClient('mana.vn', 22, 'daudm', '')
    sftp    = ssh.open_sftp()
    if ssh==None: 
        pyclib.forceQuit()
start = 0

CATEGORIES = { 
       'bestrated'  : {'link' : 'sort=best'}, 
       'popular'    : {'link' : 'sort=popular'},    
       #'sport'      : {'link' : 'category/sport'},       
       #'games'      : {'link' : 'category/games'},            
       #'space'      : {'link' : 'category/space'},
       #'animals'    : {'link' : 'category/animals'},  
       #'technology' : {'link' : 'category/technology'},
       #'fun'        : {'link' : 'category/fun'}, 
       #'nature'     : {'link' : 'category/nature'}, 
       #'abstract'   : {'link' : 'category/abstract'},
       #'sexy'       : {'link' : 'category/sexy'}, 
     }

PARAMETERS = ['free', 'apk', 'video']

def getRootLftCategory(name=None):
    try:
        result = None
        if name==None:
            result  = CATEGORY_COLLECTION.find_one({'data': 'Live Wallpapers'}, {'root_id': 1, 'lft': 1})
        else:
            result  = CATEGORY_COLLECTION.find_one({'data': name}, {'root_id': 1, 'lft': 1})
            if result==None:
                cprint('Category chưa tồn tại !', 'red'); return None, None
        if result==None: return None, None
        return result['root_id'], result['lft']
    except:
        traceback.print_exc()
        return None, None

ROOT_ID, LFT = getRootLftCategory()

def checkArticleDuplicate(link):
    ''' Kiểm tra trùng tin tức trong DB, trả vê 1 nếu trùng, 0 trong trường hợp ngược lại
    ''' 
    try:
        if link==None or link=='': return None, None
        hashLink    = pyclib.getMd5(link)   
        result      = ARTICLE_COLLECTION.find_one({'hashLink' : hashLink})
        if result!=None:  
            cprint('Document đã tồn tại trong cơ sở dữ liệu', 'red'); return 1, hashLink
        return 0, hashLink 
    except:
        traceback.print_exc()
        return None, None

def getAuthor(name='crawler'):
    try:
        result = USER_COLLECTION.find_one({'username': name}, {})
        if result==None: result = USER_COLLECTION.find_one({'username': 'Hindua88'}, {})
        if result!=None:
            return result['_id']
    except:
        traceback.print_exc()

def downloadFileAPKNotHash(url, ssh=None, sftp=None):
    try:
        ext = url[-3:] 
        ext = ext.lower()
        if ext!='apk': cprint('Không phải file .apk', 'red'); return
        u = urllib2.urlopen(url)
        meta = u.info()
        file_name = url.split('/')[-1]
        file_size = int(meta.getheaders("Content-Length")[0])
        hashUrl, path = pyclib.getPathWithName(url)
        path_file = '{0}/{1}/{2}.{3}'.format(LOCAL_PATH, 'fileapk', file_name)
        path_dir  = '{0}/{1}'.format(LOCAL_PATH, path)
        source    = '{0}/{1}/{2}.{3}'.format(PREFIX, path, hashUrl, 'apk')
        result    = 0
        if not os.path.isfile(path_file):
            if not os.path.exists(path_dir): os.makedirs(path_dir, 0777)
            print "Downloading: %s Bytes: %s" % (path_file, file_size)
            f = open(path_file, 'wb')
            while True:
                buffer = u.read(8192)
                if not buffer: break
                f.write(buffer)
            f.close()
            u.close()

            dsize = os.path.getsize(path_file)
            if dsize!=file_size:
                cprint('Không lấy được đủ dung lượng file', 'red'); result = None
        else: cprint('File đã tồn tai !', 'red'); result = 1
        if ssh!=None and sftp!=None:
            #copy to 27.0.12.106
            stdin, stdout, stderr   = ssh.exec_command("mkdir -p -m 0777 {0}".format(path_dir))
            remote_file = path_file
            info = pyclib.rexists(sftp, remote_file)
            if not info:
                flgRun = False
                for i in range(0, 3):
                    try:
                        sftp.put(path_file, remote_file)
                        print 'Copy file: ', path_file, ' => ', remote_file
                        flgRun = True; break
                    except:
                       continue 
                if flgRun:
                    cprint('Lưu file thành công!', 'green');
                else: 
                    cprint('Không upload được file lên', 'red'); result = None
            else: cprint('File đã tồn tại trên server!', 'red')
            '''
            ssh_script  = 'ssh {0} "mkdir -p {1}; chmod -R 777 {2}"'.format(remoteHost, path_dir, '{0}/{1}'.format(LOCAL_PATH, path[:1]))
            os.system(ssh_script); print ssh_script
            scp_script  = 'scp "{0}" "{1}:{2}"'.format(path_file, remoteHost, path_dir)
            os.system(scp_script); print scp_script
            '''
        return result, source, file_name, file_size
    except:
        traceback.print_exc()
        return None, None, None, None

def downloadFileAPK(url, ssh=None, sftp=None):
    try:
        ext = url[-3:] 
        ext = ext.lower()
        if ext!='apk': cprint('Không phải file .apk', 'red'); return
        u = urllib2.urlopen(url)
        meta = u.info()
        file_name = url.split('/')[-1]
        file_size = int(meta.getheaders("Content-Length")[0])
        hashUrl, path = pyclib.getPathWithName(url)
        path_file = '{0}/{1}/{2}.{3}'.format(LOCAL_PATH, path, hashUrl, 'apk')
        path_dir  = '{0}/{1}'.format(LOCAL_PATH, path)
        source    = '{0}/{1}/{2}.{3}'.format(PREFIX, path, hashUrl, 'apk')
        result    = 0
        if not os.path.isfile(path_file):
            if not os.path.exists(path_dir): os.makedirs(path_dir, 0777)
            print "Downloading: %s Bytes: %s" % (path_file, file_size)
            f = open(path_file, 'wb')
            while True:
                buffer = u.read(8192)
                if not buffer: break
                f.write(buffer)
            f.close()
            u.close()

            dsize = os.path.getsize(path_file)
            if dsize!=file_size:
                cprint('Không lấy được đủ dung lượng file', 'red'); result = None
        else: cprint('File đã tồn tai !', 'red'); result = 1
        if ssh!=None and sftp!=None:
            #copy to 27.0.12.106
            stdin, stdout, stderr   = ssh.exec_command("mkdir -p -m 0777 {0}".format(path_dir))
            remote_file = path_file
            info = pyclib.rexists(sftp, remote_file)
            if not info:
                flgRun = False
                for i in range(0, 3):
                    try:
                        sftp.put(path_file, remote_file)
                        print 'Copy file: ', path_file, ' => ', remote_file
                        flgRun = True; break
                    except:
                       continue 
                if flgRun:
                    cprint('Lưu file thành công!', 'green');
                else: 
                    cprint('Không upload được file lên', 'red'); result = None
            else: cprint('File đã tồn tại trên server!', 'red')
            '''
            ssh_script  = 'ssh {0} "mkdir -p {1}; chmod -R 777 {2}"'.format(remoteHost, path_dir, '{0}/{1}'.format(LOCAL_PATH, path[:1]))
            os.system(ssh_script); print ssh_script
            scp_script  = 'scp "{0}" "{1}:{2}"'.format(path_file, remoteHost, path_dir)
            os.system(scp_script); print scp_script
            '''
        return result, source, file_name, file_size
    except:
        traceback.print_exc()
        return None, None, None, None

def processArticle(link, cat):
    ''' Hàm xử lý chi tiết một tin tức, nếu tin đã tồn tại trong DB thì trả về: 1, ngược lại: 0
    '''
    try:
        if link==None or link=='': return
        check_exists, hashLink    = checkArticleDuplicate(link)
        if check_exists==1: return 1
        print '#####################################################################################'
        print pyclib.toAscii('Process article: ' + link)
        root_id = ROOT_ID; lft = LFT;
        print 'Root_id, lft: ', root_id, lft
        if root_id==None: return
        title = ''; thumbnail = ''; description = ''; postedDate = datetime.datetime.now(); tags = []; images = {}; downloads = {}

        tree                = pyclib.getXMLTree(link, userAgent=USER_AGENT)
        contentNode         = tree.xpath('//div[@id="wrapper"]//div[@id="main"]//div[@id="content"]')
        if len(contentNode)==0: cprint('Sai xpath => không lấy được nội dung của tin.', 'red'); return
        primaryNode         = contentNode[0]

        titleNode           = primaryNode.xpath('.//div[@class="posttop"]/h2')
        if len(titleNode) > 0:  title = pyclib.getStringWithNode(titleNode[0])
        else: return 

        tagsNode   = primaryNode.xpath('.//div[@class="posttop"]/div/div[@class="categs"]/a[@rel="category tag"]')
        for tag in tagsNode:
            atext = pyclib.getStringWithNode(tag);
            tags.append(atext)

        dtext = ''
        descNode    = primaryNode.xpath('.//div[@class="postcontent"]/div[@class="body"]/div//div[@itemprop="description"]/p')
        if len(descNode) > 0:
            for dNode in descNode:
                dtext += pyclib.getStringWithNode(dNode) + "\n"
        description = dtext

        imgNode     = primaryNode.xpath('.//div[@class="postcontent"]//div[@class="preview"]//img')
        if len(imgNode)>0:
            src = imgNode[0].get('src')
            if src==None and src=='': return
            linkImage = src
            for i in range(1, 4):
                result = None; source = file_name = ''; size = 0
                if flgCopy!=None:
                    result, source, file_name, size = pyclib.saveImageWithSCP(linkImage, PREFIX, LOCAL_PATH, ssh, sftp)
                else:
                    result, source, file_name, size     =  pyclib.saveImage(src, PREFIX, LOCAL_PATH)
                if result!=None: 
                    images = {'original_name': file_name, 'size': size, 'source': source}
                    thumbnail = source
                    break

        # dowload file nếu là file apk trường hợp còn lại lưu link của market
        # Nếu mất phí thì wallpaper sẽ không hiện phần download này. => không lưu vào database
        downloadNode = primaryNode.xpath('.//div[@class="postcontent"]/div[@class="body"]//div[@id="download"]//p[@class="text-button blue"]/a')
        if len(downloadNode)>0:
            linkDownload = downloadNode[0].get('href')
            if linkDownload.startswith('market'):
                downloads = {'original_name': '', 'size': '', 'source': linkDownload}
            else:
                for j in range(1, 4):
                  result = None; source = file_name = ''; size = 0
                  if flgCopy!=None:
                      result, source, file_name, size = downloadFileAPK(linkDownload, ssh, sftp)
                  else:
                      #result, source, file_name, size     =  downloadFileAPK(linkDownload)
                      result, source, file_name, size     =  downloadFileAPKNotHash(linkDownload)
                  if result!=None:
                      downloads = {'original_name': file_name, 'size': size, 'source': source}
                      break
        else: cprint('Không chứa link download', 'red'); return

        # Không lấy được ảnh thì không lưu vào database
        if len(images)==0:
            cprint("Không lấy được ảnh!", "red"); return
                    
        
        title = pyclib.toUnicodeDungSan(title)
        doc= ({ 'hashLink'      :   hashLink,
                'title'         :   title,
                'thumbnail'     :   thumbnail,
                'description'   :   description,
                'content'       :   {'image': images, 'download': downloads},
                'newsLink'      :   link,
                'update'        :   postedDate,
                'source'        :   'livewallpapers.org',
                'category'      :   lft,
                'root'          :   root_id,
                'is_active'     :   True,
                'lastupdate'    :   datetime.datetime.utcnow(),
                'timestamp'     :   time.time(),
                'date'          :   datetime.datetime.utcnow(),
                'tags'          :   tags, })


        if len(title) > 0: ARTICLE_COLLECTION.save(doc)
        else: cprint('Không lấy được nội dung của tin.', 'red')

        print postedDate
        print hashLink
        print pyclib.toAscii('Title: ' + title)
        cprint('Thumbnail: ' + thumbnail, 'green')
        print pyclib.toAscii('Intro: ' + description)
        return 0
    except:
        traceback.print_exc()        
        
def processPage(page, cat, parameter=None):
    try:
        gbcount = loop = 0
        lurl = ''
        if parameter==None:
            lurl = '{0}/{1}/page/{2}/'.format(SITE_URL, CATEGORIES[cat]['link'], page)
        else:
            lurl = '{0}/page/{1}/?filter={2}&{3}'.format(SITE_URL, page, parameter, CATEGORIES[cat]['link'])
        cprint('Process page : ' + lurl, 'yellow')
        tree        = pyclib.getXMLTree(lurl, userAgent=USER_AGENT)
        listNode    = tree.xpath('//div[@id="wrapper"]//div[@id="main"]//div[@id="content"]/div/div[@class="posttop"]//h2/a')
        if len(listNode) < 1: return
        for node in listNode:  
            count = processArticle(urljoin(SITE_URL, node.get('href')), cat)
            if count!=None: gbcount += count; loop += 1
        return gbcount, loop
    except:
        traceback.print_exc(); return None, None
       
def getMaxPageWithURL(lurl):
    try:
        if lurl==None or lurl=='': return 1
        tree    = pyclib.getXMLTree(lurl, userAgent=USER_AGENT)
        pageNode    = tree.xpath('//div[@id="wrapper"]//div[@id="main"]//div[@id="content"]//div[@id="navigation"]//span[@class="pages"]')
        if len(pageNode) > 0:
            text = pyclib.getStringWithNode(pageNode[0])
            m = pyclib.regexString('(\d+)$', text)
            if m:
                return int(float(m.group(1))) + 1
        return 2
    except:
        traceback.print_exc()

def processCategory(cat):
    try:
        gbcount = 0; loop = 0
        cprint('Process category : ' + CATEGORIES[cat]['link'], 'yellow')
        if cat=='bestrated' or cat=='popular':
            for parameter in PARAMETERS:
                lurl = '{0}/?{1}&{2}'.format(SITE_URL, parameter, CATEGORIES[cat]['link'])
                maxPage = getMaxPageWithURL(lurl)
                for page in range(1, maxPage):
                    print 'COUNT, LOOP : (', gbcount, ', ', loop, ')' 
                    if gbcount>MAX_COUNT or loop>MAX_ARTICLE:
                        cprint('Dừng xử lý do vượt quá số lượng trùng lặp cho phép hoặc vượt quá số lượng tin cần lấy.')
                        return 
                    c, l = processPage(page, cat, parameter)
                    if c!=None: gbcount += c; loop += l
        lurl = '{0}/{1}'.format(SITE_URL, CATEGORIES[cat]['link'])
        maxPage = getMaxPageWithURL(lurl)
        print 'maxPage: ', maxPage
        for page in range(1, maxPage):
            print 'COUNT, LOOP : (', gbcount, ', ', loop, ')' 
            if gbcount>MAX_COUNT or loop>MAX_ARTICLE:
                cprint('Dừng xử lý do vượt quá số lượng trùng lặp cho phép hoặc vượt quá số lượng tin cần lấy.')
                return 
            c, l = processPage(page, cat)
            if c!=None: gbcount += c; loop += l
    except:
        traceback.print_exc()

def timeOut():        
    global totalNewsCrawlered, totalNewsDuplicated
    while True:
        delta = time.time() - start
        if delta > 900:
            print 'Dừng chương trình vì vượt quá thời gian chạy.', datetime.datetime.now()
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        time.sleep(30)

if __name__ == '__main__':
    try:
        cprint('start crawler livewallpapers.org', 'yellow')
        #start = time.time() 
        #timeout = threading.Thread(target=timeOut).start()
        pool = workerpool.WorkerPool(size=8)
        pool.map(processCategory, CATEGORIES.keys())
        pool.shutdown(); pool.wait()
        if flgCopy!=None: ssh.close()
        pyclib.forceQuit()
    except:
        traceback.print_exc()
