# -*- encoding: utf-8 -*-
import pycommonlib as pyclib
import html2textlib
import datetime, os, traceback, re
import workerpool
import simplejson as json
import gzip, base64

from urlparse import urljoin
from lxml import etree

from pymongo import Connection
conn = Connection('localhost', 27017)
DB = conn['story']
coll_category = DB['comics_category']
coll_chapter = DB['comics_chapter']

SITE_URL = 'http://truyentranh8.com/'
#LOCAL_PATH = 'g:/Comics'
LOCAL_PATH = '/home/data'
FOLDER_STORY = 'thandongdatviet'
PREFIX = ''

def check_exists(name):
    try:
        result = coll_chapter.find_one({'name': name})
        if result: return True
        return False
    except:
        traceback.print_exc()

def getOrderChapter(text):
    try:
        pregex = re.compile('(\d+)')
        m = pregex.search(text)
        if m:
            ret = int(float(m.group(1)))
            return ret
    except:
        traceback.print_exc()
        
def processLink(job):
    try:
        title = job['title']
        link = job['link']
        print 'Process ', pyclib.toAscii(title), ', ', link
        # Get tree
        tree = pyclib.getXMLTree(link)    
        contentNode = tree.xpath('//div[@id="ChapList"]')
        if len(contentNode) == 0: print 'Sai xpath => ko lay duoc noi dung'; return
        primaryNode = contentNode[0]
        
        
        # Lay danh sach chuong
        chapters = []
        listNode = primaryNode.xpath('.//ul/li[@itemprop="itemListElement"]//a')
        for node in listNode:
            text = node.get('href')
            if text != None and len(text) > 0:
                link = urljoin(SITE_URL, text.strip())
                name = pyclib.getStringWithNode(node)
                
                order = getOrderChapter(name)
                if not order: order = 1
                doc = {'link': link, 'name': name, 'order': order, 'cat_id': job['cat_id']}
                print 'Link: ', link, name
                chapters.append(doc)
         
        pool = workerpool.WorkerPool(size=5)
        pool.map(processChapterJob, chapters)
        pool.wait()
        pool.shutdown()
        
        #story = []
        ##for chapter in chapters:
        #order = 0
        #for i in range(len(chapters) -1 , -1, -1):
        #    chapter = chapters[i]
        #    for j in range(0, 3):
        #        name = chapter['name']
        #        name = name.replace('chap', 'Tập')
        #        images = processChapter(chapter)
        #        if images != None and len(images) > 0:
        #            #story.append(images);
        #            order = order + 1
        #            doc = {
        #                   'cat_id': job['cat_id'],
        #                   'order': order,
        #                   'name': chapter['name'],
        #                   'data': images,
        #                  }
        #            ret = coll_chapter.find_one({'name': chapter['name']})
        #            if not ret:
        #                coll_chapter.save(doc)        
        #                print 'Save success chapter in database!'
        #            else:
        #                print  'Chapter was exists in database'
        #            # Exists for
        #            break                    
        #if len(story) > 0:            
        #    job['story'] = story            
        #    collection.save(job)
        #else:
        #    print 'Không lấy được data từ truyện.'
        #print 'Finished process.'
    except:
        traceback.print_exc()
        
def processChapterJob(chapter):
    try:
        for j in range(0, 3):
            name = chapter['name']
            name = name.replace('chap', 'Tập')
            images = processChapter(chapter)
            if images != None and len(images) > 0:
                #story.append(images);
                order = chapter['order']
                doc = {
                       'cat_id': chapter['cat_id'],
                       'order': order,
                       'name': name,
                       'data': images,
                      }
                ret = coll_chapter.find_one({'name': name})
                if not ret:
                    coll_chapter.save(doc)        
                    print 'Save success chapter in database!'
                else:
                    print  'Chapter was exists in database'                
    except:
        traceback.print_exc()

def processChapter(chapter):
    try:
        link = chapter['link']
        name = chapter['name']
        print 'Process link: ', link
        tree, html = pyclib.getXMLTree(link, returnHTML=True)
        pregex = re.compile("lstImagesVIP =(.+) var max");
        html = ' '.join(html.split())
        m = pregex.search(html)
        count = 0
        if m:        
            text = m.group(1)
            arr = text.split(");")
            items = []
            for item in arr:
                count += 1
                pregex2 = re.compile('http(.+)jpg')
                m2 = pregex2.search(item)
                if m2:
                    url = 'http{0}jpg?imgmax=1600'.format(m2.group(1))
                    print 'Count ', count, ': ', url
                    result, source, file_name, size = pyclib.saveImage(url, PREFIX, LOCAL_PATH, FOLDER_STORY)
                    for j in range(0, 3):
                        if result!=None:
                            items.append({'source':  source, 'url': url, 'exists': True})
                            break
            if len(items) > 0:
                return items
    except:
        traceback.print_exc()
        
def process():
    try:
        listJob = []
        job = { 'link': 'http://truyentranh8.com/Than_Dong_Dat_Viet/', 
                'title': 'Thần đồng đất việt',
                'name': 'thandongdatviet', }
                
        ret = coll_category.find_one({'name': job['name']}, {})
        cat_id = None
        if ret: cat_id = ret['_id']
        else: cat_id = coll_category.save(job)
        job['cat_id'] = str(cat_id)
        
        #print 'Cat_id: ', cat_id
        listJob.append(job)
        pool = workerpool.WorkerPool(size=1)
        pool.map(processLink, listJob)
        pool.wait()
        pool.shutdown()
    except:
        traceback.print_exc()
        
if __name__ == '__main__':
    try:
        print 'Process crawler'
        process()
        print 'Finished.', datetime.datetime.now()
    except:
        traceback.print_exc()
