# -*- coding: utf-8 -*-
'''
Created on Nov 18, 2011

@author: Forever Together
'''
import os
import datetime
import commonlib
import traceback
import re
import workerpool
import mechanize
import pickle
import html2dict as html2text
from lxml import etree
from MongoModel import MongoModel

DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/truyentranhtuan/'
MONGO_SERVER = 'localhost'
MONGO_PORT = 27017
DBNAME = 'truyentranh'


def sort_chapter_page(pages):
    return pages['originalUrl']

def sort_chapter(chapter):
    return chapter['chapter']

class DownloadJob(workerpool.Job):
    
    def __init__(self, folderName, chapterNum, link, function):
        self.folderName = folderName
        self.chapterNum = chapterNum
        self.link = link
        self.function = function
    
    def run(self):
        self.function(self.folderName, self.chapterNum, self.link)
        
class TruyenTranhTuan(MongoModel):
    
    errorFile = "truyentranhtuan.err"
    
    def __init__(self, host, port):
        MongoModel.__init__(self, host, port)
        
    def extractInfo(self, info):
        data = {'author': '', 'title': '', 'lastChapter': 0, 'description': '', 'type': ''}
        f = False
        for line in info:
            if f: data['description'] = line['data']; f = False; continue
            if re.search(r'Tên truyện', line['data']):
                data['title'] = commonlib.extractWithRegEx(r"Tên truyện:\s*(.+)", line['data'], 1)
            elif re.search(r'Tác Giả', line['data']):
                data['author'] = commonlib.extractWithRegEx(r"Tác Giả:\s*(.+)", line['data'], 1)
            elif re.search(r'Thể loại', line['data']):
                data['type'] = commonlib.extractWithRegEx(r"Thể loại:\s*(.+)", line['data'], 1)
            elif re.search(r"Tóm tắt truyện", line['data']):
                f = True; continue
            elif re.search(r'Chương mới nhất', line['data']):
                data['lastChapter'] = int(commonlib.extractWithRegEx(r'Chương mới nhất:\s*([0-9]+)', line['data'], 1))
        return data
    
    def isEmptyInfo(self, info):
        if (info['title'] == '') and (info['description'] == '') and (info['author'] == ''): return True
        return False
            
    def downloadImageTo(self, downloadPath, url):
        print 'call downloadImage to {0} from {1}'.format(downloadPath, url)
        downloadSucess = 0
        if not os.path.exists(downloadPath):
            if not os.path.isdir(os.path.dirname(downloadPath)): os.makedirs(os.path.dirname(downloadPath), 0777)
            for itime in range(1,5):
                try:
                    print 'download {0}#{1}'.format(url, itime)
                    location, response = mechanize.urlretrieve(url, downloadPath)
                    if 'image/' in str(response):
                        downloadSucess = 1
                        break
                except:    
                    continue
        else:
            downloadSucess = -1
        return downloadSucess    
        
    def downloadImage(self, folderName, chapterNum, link):
        try:
            downloadFolder = DOWNLOAD_PATH + folderName + "/" + chapterNum
            fileName = commonlib.extractWithRegEx(r"-(\d+\.[a-zA-Z]+)", link, 1).lower()
            downloadPath = downloadFolder + "/" + fileName
            print 'call downloadImage to {0} from {1}'.format(downloadPath, link)
            if not os.path.exists(downloadPath):
                downloadSucess = False
                for itime in range(1,5):
                    try:
                        print 'download {0}#{1}'.format(link, itime)
                        location, response = mechanize.urlretrieve(link, downloadPath)
                        if 'image/' in str(response):
                            downloadSucess = True
                            break
                    except:
                        continue
                if downloadSucess: print '------- successed {0}'.format(link)
        except:
            traceback.print_exc()
            raise
        
    def getFileSize(self, folderName, chapterNum, link):
        downloadFolder = DOWNLOAD_PATH + folderName + "/" + chapterNum
        fileName = commonlib.extractWithRegEx(r"-(\d+\.[a-zA-Z]+)", link, 1).lower()
        downloadPath = downloadFolder + "/" + fileName
        if not os.path.isfile(downloadPath): return -1
        return os.path.getsize(downloadPath)
            
    def getChapter(self, folderName, chapterNum, url):
        print 'call getChapter: {0}, url={1}'.format(chapterNum, url)
        try:
            html = commonlib.getHTML(url)
            linkArr = eval(commonlib.extractWithRegEx(r"var slides2=(.+?);", html, 1))
            pool = workerpool.WorkerPool(size=3)
            downloadFolder = DOWNLOAD_PATH + folderName + "/" + chapterNum
            if not os.path.isdir(downloadFolder): os.makedirs(downloadFolder, 0777)
            for link in linkArr:
                link = commonlib.urlJoin(url, link)
                pool.put(DownloadJob(folderName, chapterNum, link, self.downloadImage))
            pool.shutdown()
            pool.wait()
            data = []
            for link in linkArr:
                link = commonlib.urlJoin(url, link)
                fileName = commonlib.extractWithRegEx(r"-(\d+\.[a-zA-Z]+)", link, 1)
                data.append({'originalUrl': link, 'size': self.getFileSize(folderName, chapterNum, link), 'original_name': fileName, 'shortPath': '/uploads/truyentranhtuan/{0}/{1}/{2}'.format(folderName, chapterNum, fileName)})
            data.sort(key=sort_chapter_page)
            return data
        except:
            traceback.print_exc()
            raise
            
    def getStoryInfo(self, url, folderName):
        print 'call getStoryInfo'
        data = []
        try:
            tree = commonlib.getXMLTree(url)
            if tree == None: return;
            contentNode = tree.xpath("//div[contains(@id, '-chitiet')]")
            if len(contentNode) > 0:
                info = self.extractInfo(html2text.html2text(etree.tounicode(contentNode[0])))
                if self.isEmptyInfo(info): return;
                thumbnail = commonlib.getAttribText(tree.xpath("//div[contains(@class, 'title-logo')]/img"), 'src')
                if thumbnail != '':
                    thumbnail = commonlib.urlJoin(url, thumbnail)
                    downloadPath = DOWNLOAD_PATH + folderName + "/logo.jpg"
                    ret = self.downloadImageTo(downloadPath, thumbnail)
                    thumbnail = '/uploads/truyentranhtuan/{0}/logo.jpg'.format(folderName) if ret in [-1, 1] else ''
                for ichapter in tree.xpath("//div[@id='content-main']/table//tr[position()>1]"):
                    link = commonlib.getAttribText(ichapter.xpath("./td[1]/a"), 'href')
                    if link == '': continue
                    link = commonlib.urlJoin(url, link)
                    chapterNum = commonlib.extractWithRegEx(r"([0-9\.]+)$", commonlib.getElementText(ichapter.xpath("./td[1]/a")), 1)
                    pages = self.getChapter(folderName, chapterNum, link)
                    data.append({'chapter': float(chapterNum), 'pages': pages})
                data.sort(key=sort_chapter)
                return data, thumbnail, info
        except:
            traceback.print_exc()
            raise
        
    def getListStory(self):
        print 'call getListStory'
        url = "http://truyentranhtuan.com/danh-sach-truyen"
        global totalCrawled, totalError
        try:
            tree = commonlib.getXMLTree(url)
            if tree == None:
                print 'Error at getXMLTree getListStory'
                return;
            db = self.connection[DBNAME]
            collection = db['truyentranhtuan']
            lastLink = ''
            for item in tree.xpath("//h1[contains(., 'DANH SÁCH TRUYỆN')]/following-sibling::*[1]//tr[position()>2]".decode('utf-8')):
                try:
                    sName = commonlib.getElementText(item.xpath(".//td[1]/a"))
                    sLink = commonlib.getAttribText(item.xpath(".//td[1]/a"), "href")
                    sFolder = sLink.replace("/", '')
                    if sLink != '': sLink = commonlib.urlJoin(url, sLink)
                    if sLink == '': continue
                    lastLink = sLink
                    sLastChapter = commonlib.getElementText(item.xpath(".//td[2]/a"))
                    sStatus = commonlib.getElementText(item.xpath(".//td[3]"))
                    isFinished = 1 if sStatus == 'Hoàn thành' else 0
                    print '----------------------'
                    print 'Truyen: ' + sName
                    print 'Trang thai: ' + sStatus
                    print '----------------------'
                    hashUrl = commonlib.getMD5Hash(sLink)
                    if not collection.find_one({'hashUrl': hashUrl}):
                        content, thumbnail, info = self.getStoryInfo(sLink, sFolder);
                        collection.save({'hashUrl': hashUrl, 'title': sName, 'link': sLink, 'chuongmoinhat': sLastChapter, 'lastChapter': info['lastChapter'], 'logo': thumbnail, 'finished': isFinished,
                                         'tag': 'truyentranh', 'description': info['description'], 'author': info['author'], 'category': info['type'],
                                         'content': content})
                        totalCrawled += 1
                    else:
                        print '#$# Already exists in database'
                except:
                    totalError += 1
                    errList = self.loadPickle(self.errorFile)
                    errList.append({'link': lastLink, 'error': traceback.format_exc()})
                    self.savePickle(errList, self.errorFile)
                    continue
        except:
            traceback.print_exc()
            
    def savePickle(self, ob, fileName):
        try:
            fp = open(fileName, 'wb')
            pickle.dump(ob, fp)
            fp.close()
        except:
            traceback.print_exc()
        
    def loadPickle(self, fileName):
        ob = None
        try:
            fp = open(fileName, 'rb')
            ob = pickle.load(fp)
            fp.close()
        except:
            traceback.print_exc()
        if ob == None: return []
        return ob
    
if __name__ == '__main__':
    totalCrawled = 0
    totalError = 0
    print 'start crawler truyentranhtuan';
    crawler = TruyenTranhTuan(MONGO_SERVER, MONGO_PORT)
    crawler.getListStory()
    print 'total story crawled: {0}'.format(totalCrawled)
    print 'total story error while crawled: {0}'.format(totalError)
    print 'finished crawler truyentranhtuan at {0}'.format(datetime.datetime.now())
    os._exit(1)