# -*- encoding: utf-8 -*-
import pycommonlib as pyclib
import html2textlib
import datetime, os, traceback
import workerpool
import json, gzip, base64

from urlparse import urljoin
from lxml import etree

SITE_URL = 'http://m.truyentranhnhanh.com'
LOCAL_PATH = '/home/data'
FOLDER_STORY = 'bayvienngocrong'
PREFIX = ''
story = []

def processLink(job):
    try:
        link = job['link']
        title = job['title']
        folder = job['folder']        
        print 'Process ', pyclib.toAscii(title), ', ', link
        localPath = '{0}/{1}/{2}'.format(os.path.dirname(os.getcwd()), 'Comics', folder)
        print 'local path: ', localPath
        if not os.path.isdir(localPath): os.makedirs(localPath)
        # Check file .htm
        path_file_intro = '{0}/intro.htm'.format(localPath)
        #if os.path.isfile(path_file_intro): print 'Process ', pyclib.toAscii(title), ' completed.'; return
        file_intro = open(path_file_intro, 'w')
        # Get tree
        tree = pyclib.getXMLTree(link)    
        contentNode = tree.xpath('//article')
        if len(contentNode) == 0: print 'Sai xpath => ko lay duoc noi dung'; return
        primaryNode = contentNode[0]
        # Lay title
        title = ''
        titleNode = primaryNode.xpath('.//header[@id="topbloc"]/h2')
        if len(titleNode) > 0:
            title = pyclib.getStringWithNode(titleNode[0])
        print 'Title: ', pyclib.encodeUTF8(title)
        # Lay danh sach chuong
        chapters = []
        listNode = primaryNode.xpath('.//ul[@class="ulpost"]/li/a')
        for node in listNode:
            link = urljoin(SITE_URL, node.get('href'))
            name = pyclib.getStringWithNode(node)
            doc = {'link': link, 'name': name}
            chapters.append(doc)
        # Lay mo ta    
        descNode = primaryNode.xpath('.//div[@align="left"]')
        desc = ''
        if len(descNode) > 0:
            chtml = etree.tounicode(descNode[0], method='html')
            data, imgs = html2textlib.getContent(chtml, SITE_URL, explode='\n', output=False, stdOut=False)
            for item in data:
                if not item.has_key('type'): continue
                if item['type'] == 'image': continue
                if len(item['data']) > 0:
                    text = item['data']
                    text = ' '.join(text.split())
                    text = text.strip()
                    if len(text) > 0: desc += text + '<br />\n'
        print 'Desc: ', pyclib.toAscii(desc)
        if len(chapters) == 0: print 'Sai xpath không lấy được danh sách chương'; return
        # Write file html
        file_intro.write('''<html>
                                <head>
                                    <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
                                    <link type="text/css" rel="stylesheet" href="../css/style.css" />
                                </head>
                                <body>''')
        file_intro.write("<h3>{0}</h3>\n".format(title))
        file_intro.write("{0}".format(desc))
        file_intro.write("Viet Software J.S.C chúc các bạn có những giây phút vui vẻ khi đọc truyện. <br />\n");
        file_intro.write("Mọi thông tin liên hệ: <a href='mailto:vietsoftwarejsc@gmail.com'>vietsoftwarejsc@gmail.com</a> <br />\n");
        file_intro.write("<body></html>");
        file_intro.close()
        
        pool = workerpool.WorkerPool(size=10)
        pool.map(processChapter, chapters)
        pool.wait()
        pool.shutdown()
        
        #for chapter in chapters:
        #    for j in range(0, 3):
        #        images = processChapter(chapter)
        #        if images != None and len(images) > 0:
        #            story.append(images); break                    
                    
        file_name = '{0}/story.data'.format(localPath)
        file_name2 = '{0}/story.txt'.format(localPath)
        # File chua ma hoa
        data = json.dumps(story)
        f = open(file_name2, 'w')
        f.write(data)
        f.close()
        # File ma hoa
        text = base64.b64encode(data)
        text += 'hindua88'
        f = gzip.open(file_name, 'wb')
        f.write(text)
        f.close()
        
        print 'File ', file_name, ' is created.'
    except:
        traceback.print_exc()      
        
def processChapter(chapter):
    try:
        link = chapter['link']
        name = chapter['name']
        for j in range(0, 3):
            tree = pyclib.getXMLTree(link)
            if tree!=None: break
        listNode = tree.xpath('//div[@class="noidung"]//img')
        items = []
        count = 0
        for node in listNode:
            count += 1
            url = node.get('src')
            if not url.startswith('http'):
                url = urljoin(link, node.get('src'))
            result, source, file_name, size = pyclib.saveImage(url, PREFIX, LOCAL_PATH, FOLDER_STORY)
            for k in range(0, 3):
                if result!=None:
                    print 'Page ', count, ' ', url, ' source: ', source
                    items.append({'source':  source, 'url': url, 'exists': True})
                    break
        if len(items) > 0:
            story.append({'name': name, 'images': items})
    except:
        traceback.print_exc()
        
def process():
    try:
        listJob = []
        #for page in range(1, 25):
        #for page in range(1, 0):
        #    lurl = 'http://m.truyentranhnhanh.com/listtruyen/{0}'.format(page)
        #    print 'Process page: ', lurl
        #    tree = pyclib.getXMLTree(lurl)
        #    listNode = tree.xpath('//ul[@id="ullist"]/li/a')
        #    print len(listNode)
        #    for node in listNode:
        #        link = urljoin(SITE_URL, node.get('href'))
        #        title = ''
        #        titleNode = node.xpath('.//h3')
        #        if len(titleNode):
        #            title = pyclib.getStringWithNode(titleNode[0])
        #            folder = convertStr(title)
        #            job = {'link': link, 'title': title, 'folder': folder}
        #            # hindua's
        #            #if len(listJob) == 0:
        #            listJob.append(job)
        job = { 'link': 'http://m.truyentranhnhanh.com/truyentranh/316/dragon-ball', 
                'title': 'Bảy viên ngọc rồng', 
                'folder': 'bayvienngocrong'}
        listJob.append(job)
        pool = workerpool.WorkerPool(size=10)
        pool.map(processLink, listJob)
        pool.wait()
        pool.shutdown()
    except:
        traceback.print_exc()

def convertStr(text):
    try:
        text = pyclib.toAscii(text)
        listChar = [ ':', '?', '+', ' ', '.']
        for char in listChar:
            text = text.replace(char, '')
        return text
    except:
        traceback.print_exc()
        
if __name__ == '__main__':
    try:
        print 'Process crawler truyentranhnhanh'
        process()
        print 'Finished.', datetime.datetime.now()
    except:
        traceback.print_exc()
