# -*- coding: utf-8 -*-
'''
Created on Jun 7, 2014
 
@author: GIANG NGUYEN
'''
 
 
import os
import sys
from urlparse import urljoin
import traceback
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import logging
import pymysql
 
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')
 
class Gacsach():
     
    totalPageList = 0
    connection = None
     
    def __init__(self):
        self.url = 'http://gacsach.com/thu-vien-sach'
        self.totalPageList = self.getNumberOfPageListStory()
        self.connection = pymysql.connect(host='localhost', port=3306, user='root', passwd='', charset='utf8')
        try:
            self.connection.select_db('gacsach')
        except:
            self.inittables()
        
    def inittables(self):
        cursor = self.connection.cursor()
        sql = '''CREATE DATABASE `gacsach` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
                USE `gacsach`;
                CREATE TABLE `gacsach` (
                    `id` VARCHAR (32) PRIMARY KEY,
                    `name` VARCHAR (150),
                    `url` VARCHAR (255),
                    `story_url` VARCHAR (255),
                    `author` VARCHAR (150),
                    `translator` VARCHAR (150),
                    `thumbnail` VARCHAR (255),
                    `total_chap` INT (11),
                    `path` VARCHAR (255),
                    `tag` VARCHAR (255),
                    `is_active` TINYINT (1) DEFAULT 1,
                    `description` TEXT
                ); 
                CREATE UNIQUE INDEX `id_idx` ON `gacsach`(`id`);
                CREATE INDEX `id_is_active_idx` ON `gacsach`(`id`, `is_active`);'''
        cursor.execute(sql)
        cursor.close()
     
    def getNumberOfPageListStory(self):
        tree = lib.Web.load(self.url).build_tree()
        lastPageNode = tree.xpath("//div[@class='item-list']/ul[@class='pager']/li[contains(@class, 'pager-last')]/a")
        if len(lastPageNode) > 0:
            href = lastPageNode[0].get('href')
            totalPage = lib.extractText("\?page=(\d+)", href, 1)
            return int(float(totalPage))
        return 0
    
    def getListChapter(self, url):
        tree = lib.Web.load(url).build_tree()
        data = []
        for node in tree.xpath("//footer[@class='book-navigation']/ul/li/a"):
            name = lib.stringify(node)
            link = node.get('href')
            if link != '':
                link = urljoin(url, link)
            data.append({'name': name, 'link': link})
        return data
    
    def getMetaOfStory(self, url):
        tree = lib.Web.load(url).build_tree()
        categoriesNode = tree.xpath("//h2[contains(., 'Danh mục sách')]/following-sibling::*[1]/li/a".decode('utf-8'))
        tag = []
        for node in categoriesNode:
            tag.append(lib.stringify(node))
        tagStr = ', '.join(tag)
        thumbNode = tree.xpath("//div[@class='detail-sach']//figure/img")
        thumbnail = thumbNode[0].get('src') if len(thumbNode) > 0 else ''
        thumbnail = urljoin(url, thumbnail) if thumbnail != '' else thumbnail
        mainUrlNode = tree.xpath("//h2[contains(., 'Đọc trực tuyến:')]/following-sibling::*[1]/div/a".decode('utf-8'))
        href = mainUrlNode[0].get('href') if len(mainUrlNode) > 0 else ''
        mainUrl = urljoin(url, href) if href != '' else ''
        translatorNode = tree.xpath("//h2[contains(., 'Dịch giả:')]/following-sibling::*[1]/li/a".decode('utf-8'))
        translatorList = []
        for node in translatorNode:
            translatorList.append(lib.stringify(node))
        translator = ', '.join(translatorList)
        return {'translator': translator, 'tag': tagStr, 'mainUrl': mainUrl, 'thumbnail': thumbnail}
        
    
    def getListStoriesByPage(self, page):
        url = self.url
        if (page > 1):
            url += '?page={0}'.format(page)
        data = []
        tree = lib.Web.load(url, cached=False).build_tree()
        cursor = self.connection.cursor()
        for item in tree.xpath("//div[@class='view-content']/table/tbody/tr"):
            try:
                nameNode = item.xpath("./td[1]/a")
                name = lib.stringify(nameNode[0])
                link = nameNode[0].get('href')
                if name == '' or link == '': continue
                link = urljoin(url, link)
                authorNode = item.xpath("./td[2]/a")
                author = lib.stringify(authorNode[0]) if len(authorNode) > 0 else ''
                
                meta = self.getMetaOfStory(link)
                meta['name'] = name
                meta['author'] = author
                meta['url'] = link
                
                print 'Name: ', name
                print 'Author: ', author
                print 'Translator: ', meta['translator']
                print 'Tag: ', meta['tag']
                print 'URL: ', meta['mainUrl']
                print 'thumbnail', meta['thumbnail']
                
                storyId = lib.md5(meta['mainUrl'])
                param = {'id': storyId,
                          'name': name,
                          'author': author,
                          'translator': meta['translator'],
                          'thumbnail': meta['thumbnail'],
                          'story_url': url,
                          'url': meta['mainUrl'],
                          'tag': meta['tag'],
                          }
                
                cursor.execute("SELECT COUNT(`id`) FROM `gacsach` WHERE `id`=%s", storyId)
                row = cursor.fetchone()
                isExists = False
                if len(row) > 0:
                    isExists = row[0] > 0
                
                if not isExists:
                    sql = "INSERT INTO `gacsach`({0}) VALUES({1})".format(', '.join(param.keys()), ', '.join([ '%s' ] * len(param.values())))
                    cursor.execute(sql, param.values())
                else:
                    logging.debug("Already exists story: {0} with id: {1}".format(name, storyId))
                data.append(meta)
            except:
                print lib.Etree.tostring(item)
                logging.error(traceback.print_exc())
        cursor.close()
        self.connection.commit()
        return data
    
    def getListStories(self):
        data = lib.CacheManager.get('/tmp/gacsach/list_stories')
        if data == None:
            logging.info("load data in network")
            data = []
            for page in range(1, self.totalPageList+1):
                adata = self.getListStoriesByPage(page)
                for tmp in adata:
                    data.append(tmp)
                
            logging.info("finished get all stories list")
            lib.CacheManager.put('/tmp/gacsach/list_stories', data, 864000)
        else:
            logging.info("get data in cache")
 
if __name__ == '__main__':
     
    logging.info("start crawler gacsach.com")
    gacsach = Gacsach()
    gacsach.getListStories()
    
    
    
    logging.info("Finished crawler")
    sys.exit(0)            