# -*- coding: utf-8 -*-
'''
Created on Dec 28, 2013

@author: LONG HOANG GIANG
'''
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))
from database.mysql import MySQL
from urlparse import urljoin
import argparse
import datetime
import glob
import json
import lib
import logging
import re
import traceback
import workerpool


logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

categories = [("Ngôn tình", "ngon-tinh-trung-quoc", "http://gacsach.com/ngon-tinh-trung-quoc.html"),
              ("Tiểu thuyết tình yêu", "tieu-thuyet-tinh-yeu", "http://gacsach.com/tieu-thuyet-tinh-yeu.html"),
              ("Sách Teen", "sach-teen", "http://gacsach.com/sach-teen.html"),
              ("Tự sáng tác", "tu-sang-tac", "http://gacsach.com/tu-sang-tac.html"),
              ("Kiếm hiệp - Dã sử", "kiem-hiep-da-su", "http://gacsach.com/kiem-hiep-da-su.html"),
              ("Truyện ma - Kinh dị", "truyen-ma-kinh-di", "http://gacsach.com/truyen-ma-kinh-di.html"),
              ("Trinh thám - Hình sự", "trinh-tham-hinh-su", "http://gacsach.com/trinh-tham-hinh-su.html"),
              ("Kĩ Năng Sống", "ki-nang-song", "http://gacsach.com/ki-nang-song.html"),
              ("Tâm lý - Giới tính", "tam-li-gioi-tinh", "http://gacsach.com/tam-li-gioi-tinh.html"),
              ("Văn hóa - Xã hội", "van-hoa-xa-hoi", "http://gacsach.com/van-hoa-xa-hoi.html"),
              ("Văn học Việt Nam", "van-hoc-viet-nam", "http://gacsach.com/van-hoc-viet-nam.html"),
              ("Văn học nước ngoài", "van-hoc-nuoc-ngoai", "http://gacsach.com/van-hoc-nuoc-ngoai.html"),
              ("Văn học cổ điển", "van-hoc-co-dien", "http://gacsach.com/van-hoc-co-dien.html"),
              ("Lịch sử - Hồi ký", "lich-su-hoi-ky", "http://gacsach.com/lich-su-hoi-ky.html"),
              ("Khoa học - Kỹ thuật", "khoa-hoc-ki-thuat", "http://gacsach.com/khoa-hoc-ki-thuat.html"),
              ("Kinh tế", "kinh-te", "http://gacsach.com/kinh-te.html")
              ]


class MyJob(workerpool.Job):
    
    def __init__(self, func, *args):
        self.func = func
        self.args = args
    
    def run(self):
        self.func(*self.args)

def makeTables(force=False):
    if force:
        db.execute("DROP TABLE IF EXISTS `category`;")
        db.execute("DROP TABLE IF EXISTS `story`;")
    
    db.execute('''CREATE TABLE IF NOT EXISTS `category`(
                `id` INT PRIMARY KEY AUTO_INCREMENT, 
                `name` VARCHAR(50) NOT NULL, 
                `order_no` INT, 
                `url` VARCHAR(200), 
                `parent_id` INT DEFAULT 0,
                `tag` VARCHAR(50)
                ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
                ''')
    
    db.execute('''CREATE TABLE IF NOT EXISTS `story`(
                `id` VARCHAR(50) PRIMARY KEY,
                `name` VARCHAR(100) NOT NULL,
                `raw_name` VARCHAR(100) NOT NULL,
                `author` VARCHAR(100),
                `url` VARCHAR(100),
                `thumbnail` VARCHAR(150),
                `description` TEXT,
                `chapter_num` INT DEFAULT 0,
                `chapter_added` INT DEFAULT 0,
                `view_count` INT DEFAULT 100,
                `created_date` DATETIME,
                `lastupdate` TIMESTAMP,
                `path` VARCHAR(150),
                `tag` VARCHAR(100),
                `is_active` TINYINT
                ) ENGINE=InnoDB DEFAULT CHARSET=utf8
    
                ''')
    
    cCount = db.count('category')
    if cCount == 0:
        idx = 1
        for cat in categories:
            d = {'tag': cat[1], 'name': cat[0], 'url': cat[2], 'order_no': idx}
            db.insert("category", d)
            idx += 1
    db.commit()
    
    
def getMetaStory(url):
    try:
        tree = lib.Web.load(url).build_tree()
        thumNode = tree.xpath("//div[@id='content']//article//img[contains(@class, 'image-style')]")
        thumbnail = thumNode[0].get('src') if len(thumNode) > 0 else ''
        if thumbnail != '':
            thumbnail = urljoin(url, thumbnail)
        
        storyLinkNode = tree.xpath("//div[@id='content']//article//h2[contains(., 'Đọc trực tuyến')]/following-sibling::*[1]//a".decode('utf-8'))
        storyLink = lib.normalizeStr(storyLinkNode[0].get('href')) if len(storyLinkNode) > 0 else ''
        if storyLink != '':
            storyLink = urljoin(url, storyLink)
        if storyLink == '': raise Exception, 'story url null'
        
        descriptionNode = tree.xpath("//div[@id='content']//div[@class='detail-sach']/following-sibling::*[1]")
        description = lib.html2text(descriptionNode, textmode=thumbnail).strip()
        
        tagList = []
        for tagNode in tree.xpath("//div[@id='content']//article//h2[contains(., 'Danh mục sách')]/following-sibling::ul[1]/li/a".decode('utf-8')):
            href = tagNode.get('href').strip()
            if href == '': continue
            tag = lib.extractText(r"/(.+)\.html", href, 1)
            tagList.append(tag)
        tag = ', '.join(tagList)
        return {'description': description, 'thumbnail': thumbnail, 'url': storyLink, 'tag': tag}
    except:
        traceback.print_exc()
        raise
    return
    
def getStoryWithCat(cat):
    url = cat[2]
    while True:
        logging.info("getStoryWithCat: "+url)
        tree = lib.Web.load(url).build_tree()
        for node in tree.xpath("//div[@class='view-content']/table/tbody/tr"):
            aStory = {'name': lib.stringify(node.xpath("./td[1]"))}
            aStory['raw_name'] = lib.toKd(aStory['name']).lower()
            hrefNode = node.xpath("./td[1]/a")
            if len(hrefNode) == 0: continue
            href = hrefNode[0].get('href').strip()
            if href == '': continue
            href = urljoin(url, href)
            aStory['author'] = lib.stringify(node.xpath("./td[2]"))
            aStory['view_count'] = int(float(lib.stringify(node.xpath("./td[3]")).replace(' ', '')))
            aStory['created_date'] = datetime.datetime.strptime(lib.stringify(node.xpath("./td[4]")), "%d/%m/%Y")
            try:
                sMeta = getMetaStory(href)
            except:
                continue
            
            if re.search(r"\.htm$", sMeta['url']):
                sMeta['url'] += 'l'
            
            aStory['id'] = lib.md5(sMeta['url'])
            for k,v in sMeta.items():
                aStory[k] = v
              
                print '---------------------------------------------------'
                print aStory['name']
                print aStory['url']
                print url
                print '---------------------------------------------------'
                
            db.insert('story', aStory)
            db.commit()
            
        aNextNode = tree.xpath("//div[@class='item-list']/ul[@class='pager']/li/a[contains(., 'sau ›')]".decode('utf-8'))
        if len(aNextNode) == 0: break
        href = aNextNode[0].get('href').strip()
        if href == '': break
        url = urljoin(url, href)


def getChapterContent(url, path):
    content = ""
    try:
        tree = lib.Web.load(url).build_tree()
        contentNode = tree.xpath("//div[@class='node-content']")
        
        if len(contentNode) > 0:
            lib.Etree.cleanNodeNextSibling(contentNode[0].xpath(".//footer[contains(@id, 'book-navigation')]"), True)
            content = lib.html2text(contentNode[0], textmode=True)
    except:
        content = "<e>{0}</e>".format(url)
    lib.gz_file_put_content(content, path)
    

def getListChapter(storyId, url):
    tree = lib.Web.load(url).build_tree()
    chapters = []
    index = 1
    for node in tree.xpath("//div[@class='node-content']//ul[@class='menu clearfix']/li/a"):
        name = lib.stringify(node)
        link = node.get('href').strip()
        if link == '': continue
        if link != '':
            link = urljoin(url, link)
        chapters.append({'name': name, 'url': link, 'idx': index})
        index += 1
    
    rootPath = STORY_STORE_PATH + "/" + storyId
    lib.makedir(rootPath)
    
    chapFiles = glob.glob1(rootPath + "/", "*.chap")
    
    pool = workerpool.WorkerPool(size=10)
    for chap in chapters:
        path = "{0}/{1}.chap".format(rootPath, chap['idx'])
        if not os.path.exists(path):
            pool.put(MyJob(getChapterContent, chap['url'], path))
    pool.shutdown()
    pool.wait()
    return len(chapFiles), len(chapters)
        

def processDetailStories():
    
    logging.info("finished processDetailStories")
    rows = db.select('story')
    for row in rows:
        storyId = row[0]
        name = row[1]
        url = row[4]
        
        print '--------------------------------'
        print name.encode('utf-8')
        print url.encode('utf-8')
        print '--------------------------------'
        currentChapNum, newChapNum = getListChapter(storyId, url)
        db.update('story', {'chapter_num': newChapNum, 'chapter_added': newChapNum - currentChapNum}, {'id': storyId})
        db.commit()
    logging.info("finished processDetailStories")


def processCategories():
    
    logging.info("started processCategories")
    for cat in categories:
        getStoryWithCat(cat)
    logging.info("finished processCategories")

def checkData():
    rows = db.select('story', where={'chapter_num': 0})
    for row in rows:
        db.update('story', {'is_active': 0}, {'id': row[0]})
    db.commit()

if __name__ == '__main__':
    
    
    STORY_STORE_PATH = "/home/longhoanggiang/data/gacsach"
    DATABASE = 'gacsach'
    db = MySQL('localhost', 'crawler', '@123456@', '')
    db.execute('''SET time_zone = "+07:00"''')
    db.execute("SET NAMES utf8")
    parser = argparse.ArgumentParser()
    parser.add_argument("-m", "--make-table", help="create all table of database gacsach", action='store_true')
    parser.add_argument("--database", help="database in use, default is gacsach", default='gacsach')
    parser.add_argument("-s", "--story", help="update list story in database", action='store_true')
    parser.add_argument("-c", "--chapter", help="update chapters of story, save chapter in {path}, update chapter number", action='store_true')
    parser.add_argument("-p", "--path", help="indicator path to save in", default="/home/longhoanggiang/data/gacsach")
    parser.add_argument("-v", "--validate", help="validate data", action='store_true')
    args = parser.parse_args()
    try:
        if args.path != '':
            path = args.path.strip()
            if path.endswith("/") and len(path)>1: path = path[:-1] 
            STORY_STORE_PATH = path
        dbName = args.database.strip()
        if dbName != '':
            DATABASE = dbName
        db.execute("CREATE DATABASE IF NOT EXISTS {_db}".format(_db=DATABASE))
        db.execute("USE {_db}".format(_db=DATABASE))
        
        if args.make_table:
            makeTables(True)
        if args.story:
            processCategories()
        if args.chapter:
            processDetailStories()
        if args.validate:
            checkData()
    except:
        parser.print_help()
    db.close()
    
    logging.info("Finished")
    sys.exit(1)

