# -*- coding: utf-8 -*-
'''
Created on Jun 27, 2014

@author: GIANG NGUYEN
'''
import os
import sys
from urlparse import urljoin
import workerpool
# sys.path.append('C:/longhoanggiang/pyLib')
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import logging
import sqlite3
import json

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

crawlResult = {}

class CrawlJob(workerpool.Job):
    
    def __init__(self, callback, *args):
        self.callback = callback
        self.args = args
        
    def run(self):
        detail, key = self.callback(*self.args)
        global crawlResult
        crawlResult[key] = detail

class Crawler():
    
    cookieStr = ''
    userAgent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0'
    
    def getChapters(self, url):
        data = []
        _url = url
        response = None
        if self.cookieStr == '':
            response = lib.Web.load(_url, cached=True)
            self.cookieStr = response.get_cookie()
        else:
            response = lib.Web.load(_url, cookie=self.cookieStr, cached=True)
        tree = response.build_tree(attr_width='100%')
        for node in tree.xpath("//td[.='Mục Lục']/../../following-sibling::table[1]//td//a".decode('utf-8')):
            link = node.getparent().get('onclick')
            if link == '': continue
            link = lib.extractText(r"noidung1\('(.+)'\)", link, 1)
            link = urljoin(url, link)
            name = lib.stringify(node)
            print name, link
            data.append({'name': name, 'url': link, 'filename': lib.md5(link), 'referer': url})
        return data
    
    def getDetail(self, url, referer):
        html = lib.Web.load(url, referer=referer, cookie=self.cookieStr, reqAjax=True).get_html()
        html = html.replace('--!!tach_noi_dung!!--', '')
        html = "<body>" + html[34:]
        content = lib.getArticleContent(html)
        t = lib.buildTreeFromHtml(content)
        for img in t.xpath(".//img"):
            img.set('width', '100%')
        hetNode = lib.getPNode(t.xpath("//span[contains(., ', xem tiếp: ')]".decode('utf-8')), 'table')
        lib.cleanNextNodeOrSelf(hetNode, True)
#         content = ""
        bodyNode = t.find('body')
#         for childNode in bodyNode.iterchildren():
#             content += lib.Etree.tostring(childNode)
        content = lib.getText(bodyNode, breakline_html=True, callback=None)
        print content
        if type(content).__name__ == 'unicode':
            content = content.encode('utf-8')
        return content, lib.md5(url)
    
    def createTable(self, connection):
        cursor = connection.cursor()
        cursor.execute("CREATE TABLE IF NOT EXISTS udv_content('id' INTEGER PRIMARY KEY AUTOINCREMENT, 'hash' VARCHAR(32), 'name' VARCHAR(200), 'content' BLOB, 'is_header' INTEGER DEFAULT 0)")
        cursor.execute("CREATE INDEX IF NOT EXISTS hash_index ON udv_content(hash)")
        cursor.execute("CREATE INDEX IF NOT EXISTS id_index ON udv_content(id)")
        cursor.close()
    
    def getStory(self, url, output=None):
        if output == None:
            output = raw_input("Enter database: ")
        path = "/longhoanggiang/database/{0}".format(output)
        if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path), 0777)
        connection = sqlite3.connect(path)
        self.createTable(connection)
        
        chapters = self.getChapters(url)
        pool = workerpool.WorkerPool(size=1)
        
        for chapter in chapters:
            pool.put(CrawlJob(self.getDetail, chapter['url'], chapter['referer']))
        pool.shutdown()
        pool.wait()
        
        for chapter in chapters:
            name = chapter['name']
            detail = crawlResult[lib.md5(chapter['url'])]
            cursor = connection.cursor()
            cursor.execute("INSERT INTO udv_content('name', 'content') VALUES(?, ?)", [name.decode('utf-8'), buffer(lib.compressStr(json.dumps(detail)))])
            cursor.close()
            print name
            print detail[0:200]
        
        connection.commit()
        connection.close()
        logging.info("saved database in {0}".format(path))
        
    
if __name__ == '__main__':
    
    c = Crawler()
#     url = 'http://vnthuquan.net/Truyen/truyen.aspx?tid=2qtqv3m3237nvn0ntn3nmn31n343tq83a3q3m3237nvn#phandau'
#     chapters = c.getChapters(url)
#     c.getDetail(chapters[2]['url'], chapters[2]['referer'])
    url = raw_input("Enter url: ")
    if url.startswith("http:"):
        c.getStory(url)
    else:
        logging.error("Inivalid URL")
    logging.info("Finished crawl")
    os._exit(1)
    