# -*- coding: utf-8 -*-
''' @author: Dinh Manh Dau <dinhmanhdau@gmail.com>
'''
import  workerpool
import  traceback
import  datetime, time
import  re, os
import  pycommonlib as pyclib
import  html2textlib
import  urllib
import  threading
import  urlparse

from    pymongo     import  Connection
from    termcolor   import  cprint

LOCAL_PATH          = '/home/data1/truyentranhtuan'
FOLDER_PATH          = '/home/hoangnamhai/HarvestedData/truyentranhtuan'
#MONGO_SERVER        = 'beta.mana.vn'   
MONGO_SERVER        = 'localhost'   
MONGO_PORT          = 27017
DATABASE            = 'my_database'
PREFIX              = '/uploads/truyentranhtuan' 
SITE_URL            = 'http://truyentranhtuan.com/danh-sach-truyen'
SITE_BASE           = 'http://truyentranhtuan.com'
CONNECT             = Connection(MONGO_SERVER, MONGO_PORT)
DB                  = CONNECT[DATABASE]
ARTICLE_COLLECTION  = DB['truyentranh_article']
CATEGORY_COLLECTION = DB['category']
USER_COLLECTION     = DB['backend_user']
flagSave = True
os.umask(0000)
conn    = Connection('27.0.12.106', 27017)
db2     = conn['my_database']
def getRootLft(name=None):
    try:
        collection  = db2['category']
        result = None
        if name==None: result = collection.find_one({'data': 'Truyện tranh'}, {'root_id': 1, 'lft': 1})
        else: result = collection.find_one({'data': name}, {'root_id': 1, 'lft': 1})
        if result==None: cprint('Category chưa tồn tại !', 'red'); return None, None
        else: return result['root_id'], result['lft']
    except:
        traceback.print_exc()
root_id, lft = getRootLft()
if root_id==None: pyclib.forceQuit()

def checkDuplicate(link):
    ''' Kiểm tra trùng tin tức trong DB, trả vê 1 nếu trùng, 0 trong trường hợp ngược lại
    ''' 
    try:
        if link==None or link=='': return None, None, None, None, None
        hashLink    = pyclib.getMd5(link)   
        result      = ARTICLE_COLLECTION.find_one({'hashLink' : hashLink})
        if result!=None:  cprint('Story đã tồn tại trong cơ sở dữ liệu', 'red'); return 1, hashLink, result['_id'], result['chuongmoinhat'], result['finished']
        return 0, hashLink, None, 0, 0
    except:
        traceback.print_exc()
        return None, None, None, None, None

def getAuthor(name='crawler'):
    try:
        collection  = db2['backend_user']
        result = collection.find_one({'username': name}, {})
        if result==None: result = USER_COLLECTION.find_one({'username': 'Hindua88'}, {})
        if result!=None:
            return result['_id']
    except:
        traceback.print_exc()

def timeOut():        
    while True:
        delta = time.time() - start
        if delta > 900:
            print 'Dừng chương trình vì vượt quá thời gian chạy(time out).', datetime.datetime.now()
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        time.sleep(30)

def processImage(data):
    global flagSave
    try:
        page    = data['page']
        folder  = data['folder'] 
        #print 'Donwload image from: ', page
        m    = pyclib.regexString('-(\d+).JPG', page)
        if not m: m  = pyclib.regexString('-(\d+).jpg', page)
        if not m: m  = pyclib.regexString('_(\d+)', page)
        if not m: m  = pyclib.regexString('-(\d+).', page)
        if not m: m  = pyclib.regexString('-(\d+)', page)
        if m:
            image_name = m.group(1)
            sotrang = int(float(m.group(1)))
            page = urlparse.urljoin(SITE_BASE, page)
            linkImage = page
            result, source, file_name, size = pyclib.saveImageNotHash(linkImage, PREFIX, folder, image_name)
            if result!=None:
                images = {'original_name': file_name, 'size': size, 'source': source, 'link': linkImage}
                return {'page': sotrang, 'image': images}
            else:
                if not file_name.endswith('.doc') and not file_name.endswith('.db'):
                    if flagSave: flagSave = False
        else:
            if flagSave: flagSave = False
    except:
        traceback.print_exc()
        flagSave = False

def processChapter(link, foldername):
    try:
        print 'PROCESS CHAPTER: ', link
        tree, html = pyclib.getXMLTree(link, returnHTML=True)
        listPages = []; key_chapter = ''
        preg    = re.compile(r'var slides2=(.+);')
        m       = preg.search(html)
        arr     = link.split('/')
        if len(arr)<2: cprint('Không phải là link story, check lại link: '+ link, 'red'); return
        if link.endswith('/'): key_chapter = arr[-2]
        else: key_chapter = arr[-1]
        if key_chapter=='': cprint('Không lấy được số chương.', 'red'); return
        #key_chapter = key_chapter.replace('.', '-')
        path_dir    = '{0}/{1}'.format(foldername, key_chapter)
        result      = makeDirectory(path_dir)
        if result==None: cprint('Không tạo được directory with path:' + path_dir, 'red'); return
        if m: 
            exec('data=' + m.group(1))
            listImages = []; tmp = {}; listPages = []
            pool        = workerpool.WorkerPool(size=8)
            data_image  = []
            for item in data: data_image.append({'folder': path_dir, 'page': item})
            listImages  = pool.map(processImage, data_image)
            pool.shutdown(); pool.wait()
            for item in listImages: 
                if item==None: continue
                tmp[item['page']] = item['image']
            for key, value in tmp.iteritems(): listPages.append(value)
            return {'chapter': str(key_chapter), 'pages': listPages}
        else: cprint('Không lấy được nội dung của truyện.', 'red')
    except:
        traceback.print_exc()

def getSoChuongMoiNhat(text):
    try:
        m   = pyclib.regexString('(\d+)', text)
        if m: return int(float(m.group(1)))
        return 0
    except:
        traceback.print_exc()

def makeDirectory(path_dir):
    try:
        if not os.path.isdir(path_dir):
            os.makedirs(path_dir); return 1
        return 0
    except:
        traceback.print_exc() 

def getNameFolderWithUrl(link):
    try:
        result = None
        if link.endswith('/'):
            result = link.split('/')[-2]
        else:
            result = link.split('/')[-1]
        return result
    except:
        traceback.print_exc()

def processStory(link, title):
    try:
        global flagSave
        flagSave = True
        print '###############################################################################'
        exists, hashLink, storyId, oldChapter, statusStory = checkDuplicate(link)
        if exists==None: return
        if statusStory==1: cprint('Truyện đã hoàn thành.', 'green'); return
        print 'Process story with link: ', link
        # create folder
        sub_folder    = getNameFolderWithUrl(link)
        if sub_folder==None: cprint('không tạo được folder chứa truyện with: ' + link, 'red'); return
        foldername  = '{0}/{1}'.format(LOCAL_PATH, sub_folder)
        result      = makeDirectory(foldername)
        if result==None: cprint('Không make được thư mục with path: ' + foldername, 'red'); return
        shortPath = '{0}/{1}'.format(PREFIX, sub_folder)
        images = {}; title =''; othertitle = ''; theloai = [];  tacgia = []; chuongmoinhat = 0; status = ''
        description = ''; finished = 0; thumbnail = '';

        tree    = pyclib.getXMLTree(link)
        firstNode = tree.xpath('//div[@id="content-top"]')
        if len(firstNode)<1: cprint('Sai xpath không thể lấy được nội dung.', 'red'); pyclib.forceQuit()
        introNode = firstNode[0]
        imageNode = introNode.xpath('./div[@class="title-logo1"]/img')
        if len(imageNode)>0:
            linkImage = imageNode[0].get('src')
            result = None; source = file_name = ''; size = 0
            result, source, file_name, size = pyclib.saveImageNotHash(linkImage, PREFIX, foldername, 'logo')
            if result!=None: thumbnail = file_name
        titleNode = introNode.xpath('.//div[@id="fontsize-chitiet"]/span[@class="series-info"]')
        count = len(titleNode)
        if count==0: cprint('Không lấy được tiêu đề.', 'red'); return
        for i in range(0, count):
            node = titleNode[i]
            text = pyclib.getStringWithNode(node)
            if i==0: title = text; node.getparent().remove(node); continue
            aNode = node.xpath('./a')
            if len(aNode)>0:
                for anode in aNode: theloai.append(pyclib.getStringWithNode(anode))
            else: othertitle = text
            node.getparent().remove(node) 
        aNode = introNode.xpath('.//div[@id="fontsize-chitiet"]/a')
        for anode in aNode:
            text = pyclib.getStringWithNode(anode)
            if anode.get('title')!=None: tacgia.append(text)
            else: 
                status = text
                if pyclib.getDatatypeName(text)=='unicode': text = text.encode('utf-8')
                text   = text.lower()
                if text=='hoàn thành': finished = 1
            anode.getparent().remove(anode)
        descNode = introNode.xpath('.//div[@id="fontsize-chitiet"]/p')
        for node in descNode:
            text = pyclib.getStringWithNode(node)
            if len(description)>0: description += ' '
            if text!=None and len(text)>0: description += text
            node.getparent().remove(node)
        chuongmoinhat = getSoChuongMoiNhat(pyclib.getStringWithNode(introNode))
        # get chapter
        data = []; sochuong = 0

        print 'Chuong moi nhat', chuongmoinhat, oldChapter
        if exists==1:
            if chuongmoinhat>oldChapter:
                listNode = tree.xpath('//div[@id="content-main"]/table/tr/td/a')
                for i in range(len(listNode)-1, -1, -1):
                    node = listNode[i]
                    href = urlparse.urljoin(SITE_BASE, node.get('href'))
                    tmp = processChapter(href, foldername)
                    data.append(tmp)
        else: 
            listNode = tree.xpath('//div[@id="content-main"]/table/tr/td/a')
            sochuong = len(listNode)
            for i in range(sochuong-1, -1, -1):
                node = listNode[i]
                href = urlparse.urljoin(SITE_BASE, node.get('href'))
                tmp = processChapter(href, foldername)
                data.append(tmp)
        if pyclib.getDatatypeName(title)=='unicode': title = title.encode('utf-8')
        title = unicode(title, 'utf-8')
        if pyclib.getDatatypeName(othertitle)=='unicode': othertitle = othertitle.encode('utf-8')
        othertitle = unicode(othertitle, 'utf-8')
        if pyclib.getDatatypeName(description)=='unicode': description = description.encode('utf-8')
        description = unicode(description, 'utf-8')
        print 'Title:', pyclib.toAscii(title)
        print 'Other title: ', pyclib.toAscii(othertitle)
        print 'Tac gia:', tacgia
        print 'The loai:', theloai
        print 'Description: ', pyclib.toAscii(description)
        print 'Chuong moi nhat: ', chuongmoinhat
        print 'Finished: ', finished
        print 'Status: ', pyclib.toAscii(status)
        print 'shortPath: ', shortPath
        if sochuong==0: sochuong = chuongmoinhat
        author  = getAuthor()
        if author==None: cprint('Author chưa tồn tại trong cơ sở dữ liệu.', 'red'); pyclib.forceQuit()
        if exists==1:
           if chuongmoinhat>oldChapter:
               ARTICLE_COLLECTION.update({'_id': storyId}, {'$set': {'content': data, 'lastupdate': datetime.datetime.now(), 'chuongmoinhat': chuongmoinhat}}) 
               cprint('Cập nhật dữ liệu mới.', 'green')
           else: cprint('Không có dữ liệu mới.', 'red')
        else:
            doc = { 'hashLink'      : hashLink,  
                    'root'          : root_id,  
                    'category'      : lft,
                    'description'   : description,
                    'content'       : data,
                    'date'          : datetime.datetime.now(),
                    'lastupdate'    : datetime.datetime.now(),
                    'is_active'     : True,
                    'price'         : False,
                    'source'        : 'truyentranhtuan.com',
                    'title'         : title,
                    'othertitle'    : othertitle,
                    'shortPath'     : shortPath,
                    'tacgia'        : tacgia,
                    'theloai'       : theloai,
                    'foldername'    : FOLDER_PATH,
                    'sochuong'      : sochuong,
                    'link'          : link,
                    'type'          : 'truyentranh',
                    'update'        : datetime.datetime.utcnow(),
                    'tags'          : ['truyen tranh'],
                    'chuongmoinhat' : chuongmoinhat,
                    'thumbnail'     : thumbnail,
                    'author_id'     : author,
                    'status'        : status,
                    'finished'      : finished
             }
            if flagSave:
                ARTICLE_COLLECTION.save(doc)
                cprint('Lưu thành công story.', 'yellow');
        return exists
    except:
        traceback.print_exc()

def process():
    try:
        tree        = pyclib.getXMLTree(SITE_URL)
        listNode    = tree.xpath('//div[@id="content-main"]//table//tr/td[1]/a')
        for node in listNode:
           link  = urlparse.urljoin(SITE_BASE, node.get('href'))
           title = pyclib.getStringWithNode(node)
           processStory(link, title)
    except:
        traceback.print_exc()
if __name__ == '__main__':
    try:
        cprint('start crawler truyentranhtuan.com', 'yellow')
        start = time.time() 
        timeout = threading.Thread(target=timeOut).start()
        process()
#        processStory('http://truyentranhtuan.com/9-faces-of-love/', '')
#        processStory('http://truyentranhtuan.com/otoyomegatari/', '')
#        processStory('http://truyentranhtuan.com/dragon-quest-dai-no-daiboken/', '')
#        processStory('http://truyentranhtuan.com/great-teacher-onizuka/', '')
#        pool = workerpool.WorkerPool(size=1)
#        pool.map(processCategory, CATEGORIES.keys())
#        pool.shutdown(); pool.wait()
        pyclib.forceQuit()
    except:
        traceback.print_exc()

CONNECT.end_request()     
conn.end_request()
