# -*- coding: utf-8 -*-
'''
Created on Jun 27, 2014

@author: GIANG NGUYEN
'''
import os
import sys
from urlparse import urljoin
import workerpool
# sys.path.append('C:/longhoanggiang/pyLib')
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import logging
import sqlite3
import json

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

crawlResult = {}

class CrawlJob(workerpool.Job):
    
    def __init__(self, callback, *args):
        self.callback = callback
        self.args = args
        
    def run(self):
        detail, key = self.callback(*self.args)
        global crawlResult
        crawlResult[key] = detail

class Crawler():
    
    outputFolder = ""
    
    def getChapters(self, url):
        data = []
        _url = url
        resp = lib.Web.load(_url, cached=True)
        tree = resp.build_tree()
        cookie = resp.get_cookie()
        urls = []
        alias = lib.extractText(r"/truyen/([^/]+)", url, 1)
        storyId = lib.extractText(r"/(\d+)\.html", url, 1)
        
        print alias, storyId
        
        for aNode in tree.xpath("//div[@class='chuong_page']//a"):
            link = aNode.get('href')
            if link == '': continue
            link = urljoin(url, link)
            urls.append(link)
        if len(urls) == 0:
            urls.append(url)
            
        apiUrl = "http://mong.vn/index.php"    
        for pageNum in range(1, len(urls)+1):
            print pageNum
            resp = lib.Web.load(apiUrl, {"act": "pagination", "alias": alias, "module": "truyen", "page": pageNum, "truyen": storyId}, cookie=cookie, reqAjax=True)
            tree = resp.build_tree()
            for node in tree.xpath("//table/tbody//td/a"):
                link = node.get('href')
                if link != '':
                    link = urljoin(url, link)
                if link == '': continue
                name = lib.stringify(node)
                print name, link
                data.append({'name': name, 'url': link, 'filename': lib.md5(link), 'parent': 0})
        data.reverse()
        return data
    
    def handelImage(self, contentNode, url, should_download=True):
        imagePath = self.outputFolder + "/imgages"
        if not os.path.exists(imagePath):
            os.makedirs(imagePath, 0777)
        for imageNode in contentNode.xpath(".//img"):
            link = imageNode.get('src')
            if link == '': continue
            link = urljoin(url, link)
            if (should_download):
                dlink = lib.Web.simpleDownload(link, imagePath, "images")
                imageNode.set('src', dlink)
            else:
                imageNode.set('width', '100%')
    
    def getDetail(self, url):
        try:
            tree = lib.Web.load(url, cached=True).build_tree()
            content = ""
            for xpathStr in ["//div[@class='chuong_noidung']"]:
                print xpathStr
                nodeContent = tree.xpath(xpathStr)
                if len(nodeContent) == 0 : continue
                nodeContent = nodeContent[0]
                self.handelImage(nodeContent, url, False)
                content = lib.Etree.tostring(nodeContent)
                print "> Content length: {0}".format(len(content))
                if len(content) < 500:
                    print content
            return content, lib.md5(url)
        except:
            logging.error("Error {0} with URL: {1}".format(sys.exc_info()[1], url))
    
    def createTable(self, connection):
        cursor = connection.cursor()
        cursor.execute("CREATE TABLE IF NOT EXISTS udv_content('id' INTEGER PRIMARY KEY AUTOINCREMENT, 'hash' VARCHAR(32), 'name' VARCHAR(200), 'content' BLOB, 'is_header' INTEGER DEFAULT 0)")
        cursor.execute("CREATE INDEX IF NOT EXISTS hash_index ON udv_content(hash)")
        cursor.execute("CREATE INDEX IF NOT EXISTS id_index ON udv_content(id)")
        cursor.close()
    
    def getStory(self, url, output=None):
        if output == None:
            output = lib.extractText(r"http://mong.vn/truyen/([^/]+)/", url, 1)
        path = "/longhoanggiang/database/{0}".format(output)
        self.outputFolder = "/longhoanggiang/images/{0}".format(output)
        if os.path.exists(path):
            os.unlink(path)
        if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path), 0777)
        connection = sqlite3.connect(path)
        self.createTable(connection)
        
        chapters = self.getChapters(url)
        pool = workerpool.WorkerPool(size=2)
        
        for chapter in chapters:
            pool.put(CrawlJob(self.getDetail, chapter['url']))
        pool.shutdown()
        pool.wait()
        
        for chapter in chapters:
            name = chapter['name']
            detail = crawlResult[lib.md5(chapter['url'])]
            detail = "<strong>{0}</strong><br /><hr /><br />{1}".format(name, detail)
            cursor = connection.cursor()
            cursor.execute("INSERT INTO udv_content('name', 'content') VALUES(?, ?)", [name.decode('utf-8'), buffer(lib.compressStr(json.dumps(detail)))])
            cursor.close()
            print name
            print detail[0:200]
        
        connection.commit()
        connection.close()
        logging.info("saved database in {0}".format(path))
    
    def getMutipleStory(self, urls, output=None):
        if output == None or output == '':
            output = raw_input("Enter database name: ")
        path = "/longhoanggiang/database/{0}".format(output)
        self.outputFolder = "/longhoanggiang/images/{0}".format(output)
        if os.path.exists(path):
            os.unlink(path)
        if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path), 0777)
        connection = sqlite3.connect(path)
        self.createTable(connection)
        for url in urls:
            chapters = self.getChapters(url)
            pool = workerpool.WorkerPool(size=2)
            for chapter in chapters:
                pool.put(CrawlJob(self.getDetail, chapter['url']))
            pool.shutdown()
            pool.wait()
            for chapter in chapters:
                name = chapter['name']
#                 detail = self.getDetail(chapter['url'])
                detail = crawlResult[lib.md5(chapter['url'])]
                if detail == '' or len(detail) < 10: continue
                detail = "<strong>{0}</strong><br /><hr /><br />{1}".format(name, detail)
                cursor = connection.cursor()
                cursor.execute("INSERT INTO udv_content('name', 'content') VALUES(?, ?)", [name.decode('utf-8'), buffer(lib.compressStr(json.dumps(detail)))])
                cursor.close()
                print name
                print detail
        
        connection.commit()
        connection.close()
    
if __name__ == '__main__':
    
    c = Crawler()
#     print c.getDetail('http://mong.vn/truyen/Nguoi-Phu-Nu-Cua-Tong-Giam-Doc-Chuong-302-Anh-muon-em/483727.htm')
    url = raw_input("Enter url: ")
    if url.startswith("http:"):
        if url.find('|') > -1:
            urls = url.split('|')
            c.getMutipleStory(urls)
        else:
            c.getStory(url)
    else:
        logging.error("Inivalid URL")
    logging.info("Finished crawl")
    os._exit(1)
    