# -*- coding: utf-8 -*-
'''
Created on Jun 27, 2014

@author: GIANG NGUYEN
'''
import os
import sys
from urlparse import urljoin
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
#sys.path.append('C:/longhoanggiang/pylib')
import lib
import logging
import sqlite3
import json
import workerpool

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

crawlResult = {}

class CrawlJob(workerpool.Job):
    
    def __init__(self, callback, *args):
        self.callback = callback
        self.args = args
        
    def run(self):
        detail, key = self.callback(*self.args)
        global crawlResult
        crawlResult[key] = detail

class Crawler():
    
    def getChapters(self, url):
        tree = lib.Web.load(url).build_tree()
        data = []
        for node in tree.xpath("//div[@class='content']//div[contains(@id, 'book-navigation')]//ul[@class='menu']/li/a"):
            link = node.get('href')
            if link != '':
                link = urljoin(url, link)
            if link == '': continue
            name = lib.stringify(node)
            data.append({'name': name, 'url': link, 'filename': lib.md5(link), 'parent': 0})
        return data
    
    def getDetail(self, url):
        tree = lib.Web.load(url, cached=True).build_tree()
        nodeContent = tree.xpath("//section[@id='content']//div[@class='content']/article")
        lib.Etree.cleanNode("./nav", nodeContent)
        headNode = tree.xpath("//head/script[contains(., 'jQuery.extend')]")
        jQueryStr = lib.Etree.tostring(headNode)
        
        si = eval(lib.extractText(r'"si"\:(.+?),"', jQueryStr, 1))
        sil = int(lib.extractText(r'"sil":(\d+)', jQueryStr, 1))
        replaceArr = []
        for i in si:
            tmp = ""
            for j in range(0, len(i)):
                tmp += str(unichr(i[j] - sil))
            replaceArr.append(tmp)
            
        def handelImgTag(node):
            if node.tag == 'img':
                imgUrl = node.get('src')
                if imgUrl != '':
                    imgUrl = urljoin(url, imgUrl)
                    return "<img src=\"{0}\" width=\"100%\" />".format(imgUrl)
            return None
        
        content = lib.getText(nodeContent, paragraph_break=2, breakline_html=False, callback=handelImgTag).strip()
        content = content.replace("\n", "<br />")
        if type(content).__name__ == 'unicode':
            content = content.encode('utf-8')
        content = content.replace('www.gacsach.com', '')
        for ireplace in replaceArr:
            content = content.replace(ireplace, '')
        return content, lib.md5(url)
    
    def createTable(self, connection):
        cursor = connection.cursor()
        cursor.execute("CREATE TABLE IF NOT EXISTS udv_content('id' INTEGER PRIMARY KEY AUTOINCREMENT, 'hash' VARCHAR(32), 'name' VARCHAR(200), 'content' BLOB, 'is_header' INTEGER DEFAULT 0)")
        cursor.execute("CREATE INDEX IF NOT EXISTS hash_index ON udv_content(hash)")
        cursor.execute("CREATE INDEX IF NOT EXISTS id_index ON udv_content(id)")
        cursor.close()
    
    def getStory(self, url, output):
        
        path = "/longhoanggiang/database/{0}".format(output)
        if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path), 0777)
        connection = sqlite3.connect(path)
        self.createTable(connection)
        
        chapters = self.getChapters(url)
        pool = workerpool.WorkerPool(size=10)
        
        for chapter in chapters:
            pool.put(CrawlJob(self.getDetail, chapter['url']))
        pool.shutdown()
        pool.wait()
        
        for chapter in chapters:
            name = chapter['name']
#             detail = self.getDetail(chapter['url'])
            detail = crawlResult[lib.md5(chapter['url'])]
            detail = "<h4 id='title'>{0}</h4>{1}".format(name, detail)
            cursor = connection.cursor()
            cursor.execute("INSERT INTO udv_content('name', 'content') VALUES(?, ?)", [name.decode('utf-8'), buffer(lib.compressStr(json.dumps(detail)))])
            cursor.close()
            print name
            print detail
        
        connection.commit()
        connection.close()
    
    def getMutipleStory(self, urls, output):
        path = "/longhoanggiang/database/{0}".format(output)
        if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path), 0777)
        connection = sqlite3.connect(path)
        self.createTable(connection)
        for url in urls:
            chapters = self.getChapters(url)
            pool = workerpool.WorkerPool(size=10)
            for chapter in chapters:
                pool.put(CrawlJob(self.getDetail, chapter['url']))
            pool.shutdown()
            pool.wait()
            for chapter in chapters:
                name = chapter['name']
#                 detail = self.getDetail(chapter['url'])
                detail = crawlResult[lib.md5(chapter['url'])]
                detail = "<h4 id='title'>{0}</h4>{1}".format(name, detail)
                cursor = connection.cursor()
                cursor.execute("INSERT INTO udv_content('name', 'content') VALUES(?, ?)", [name.decode('utf-8'), buffer(lib.compressStr(json.dumps(detail)))])
                cursor.close()
                print name
                print detail
        
        connection.commit()
        connection.close()
    
    def getDocLink(self, url):
        tree = lib.Web.load(url, cached=True).build_tree()
        node = tree.xpath("//h2[contains(., 'Đọc trực tuyến')]/following-sibling::div[1]//a".decode('utf-8'))[0]
        link = node.get('href')
        if link == '':
            raise
        return urljoin(url, link)
    
    def crawlMultiStories(self, startPage=0, endPage=1):
        output = "/longhoanggiang/database/ngontinh{0}{1}".format(startPage, endPage)
        if os.path.exists(output):
            os.unlink(output)
        connection = sqlite3.connect(output)
        self.createTable(connection)
        global crawlResult
        data = []
        for page in range(startPage, endPage + 1):
            url = "http://gacsach.com/ngon-tinh-trung-quoc.html?order=totalcount&sort=desc&page={0}".format(page)
            tree = lib.Web.load(url, cached=True).build_tree()
            for node in tree.xpath("//div[@id='thuvien']//table[contains(@class, 'views-table')]/tbody/tr/td[1]/a"):
                name = lib.stringify(node)
                link = node.get('href')
                if link == '': continue
                link = urljoin(url, link)
                docLink = self.getDocLink(link)
                print name, link, docLink
                data.append({'name': name, 'url': docLink})
            
        for story in data:
            pool = workerpool.WorkerPool(size=5)
            chapters = self.getChapters(story['url'])
            if len(chapters) == 0: continue
            for chapter in chapters:
                pool.put(CrawlJob(self.getDetail, chapter['url']))
            pool.shutdown()
            pool.wait()
            
            cursor = connection.cursor()
            cursor.execute("INSERT INTO udv_content('name', 'is_header') VALUES(?, 1)", [story['name'].decode('utf-8')])
            for chapter in chapters:
                name = chapter['name']
                detail = crawlResult[lib.md5(chapter['url'])]
                detail = "<strong>{0}</strong><br /><hr /><br />{1}".format(name, detail)
                cursor.execute("INSERT INTO udv_content('name', 'content') VALUES(?, ?)", [name.decode('utf-8'), buffer(lib.compressStr(json.dumps(detail)))])
                print name
                print detail[:200]
            cursor.close()
            crawlResult = {}
            
        connection.commit()
        connection.close()
    
    
if __name__ == '__main__':
    
    c = Crawler()
    
#     a, b = c.getDetail('http://gacsach.com/doc-online/92330/the-gioi-nghich-thu-muc-tham-khao.html')
#     print a
#     
#     os._exit(1)
    
    url = raw_input("Enter url: ")
    output = raw_input("Enter database name: ")
    if url.startswith("http:"):
        if output == '' or output == None:
            output = 'khongten'
        if url.find('|') > -1:
            urls = url.split('|')
            c.getMutipleStory(urls, output)
        else:
            c.getStory(url, output)
    else:
        logging.error("Inivalid URL")
    logging.info("Finished crawl")
    os._exit(1)
    