# -*- coding: utf-8 -*-
'''
Created on Aug 5, 2014

@author: TRAM ANH
'''
import os
import sys
from urlparse import urljoin
import traceback
from os import renames
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import logging
import sqlite3
import json
import workerpool
from Cheetah.Template import Template

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

templateDef = """
<html>
<head>
<meta charset="utf-8" />
<meta name="viewport" content="initial-scale=1, maximum-scale=1">
<link rel="stylesheet" type="text/css" href="css/style-tutorialspoint.css" />
<style>
body { font-size: 1.3em; }
</style>
</head>
<body>
$contents
</body>
</html>
""" 

crawlResult = {}

class CrawlJob(workerpool.Job):
    
    def __init__(self, callback, *args):
        self.callback = callback
        self.args = args
        
    def run(self):
        print self.args
        detail, key = self.callback(*self.args)
        global crawlResult
        crawlResult[key] = detail
        print 'DONE'

class Crawler():
    
    urlList = []
    output = '/longhoanggiang/new-database/{0}'
    
    def addSite(self, name, url):
        for item in self.urlList:
            if item['url'] == url: return
        self.urlList.append({'name': name, 'url': url})
            
    def setOutput(self, output):
        self.output = self.output.format(output)
        if not os.path.exists(os.path.dirname(self.output)):
            os.makedirs(os.path.dirname(self.output), 0777)
    
    def crawlChapters(self):
        data = []
        for site in self.urlList:
            url = site['url']
            tree = lib.Web.load(url, cached=True).build_tree()
            for xpathStr in ["//div[@id='leftcol']/ul[@class='menu']/li[contains(., 'Useful Resources')]/../following-sibling::*[1]",
                             "//table[@class='main']//td[@class='content']//b[contains(., 'Useful References')]/following-sibling::b[1]"]:
                print xpathStr
                removeNode = tree.xpath(xpathStr)
                print len(removeNode)
                if len(removeNode) > 0:
#                     lib.cleanNextNodeOrSelf(removeNode[0], True)
                    print lib.Etree.tostring(removeNode[0])
                    for node in removeNode[0].getnext():
                        print lib.Etree.tostring(node)
                    removeNode[0].getparent().remove(removeNode[0])
            idata = []
            xpathLst = ["//div[@id='leftcol']/ul[@class='menu']//a", "//div[@class='left']/ul[@class='menu']//a[@class='left']"]
            for xpathStr in xpathLst:
                nodes = tree.xpath(xpathStr)
                if len(nodes) == 0: continue
                for node in nodes:
                    name = lib.stringify(node)
                    link = node.get('href')
                    if link == '': continue
                    link = urljoin(url, link)
                    idata.append({'name': name, 'url': link, 'hash': lib.md5(link)})
                    print name, link
                if len(self.urlList) == 1:
                    data = idata
                else:
                    data.append(idata)
                break
        return data
    
    def crawlDetail(self, url, key=None):
        if key == None: key = lib.md5(url)
        tree = lib.Web.load(url, cached=True).build_tree()
        xpathLst = ["//div[@class='content']", "//table[@class='main']//table[@class='middle']//tr/td"]
        for xpathStr in xpathLst:
            contentNode = tree.xpath(xpathStr)
            if len(contentNode) == 0: continue
            if len(contentNode) > 0:
                contentNode = contentNode[0]
                lib.Etree.cleanNode(".//div[@class='topgooglead']", contentNode)
                for ixpath in [".//script", ".//div[contains(., 'Advertisements')]", "./div[@class='pre-btn']", "./div[@class='nxt-btn']", "./div[@class='print-btn']", "./div[@class='pdf-btn']", ".//img[@src='/images/next.gif']/..", ".//img[@src='/images/previous.gif']/..",
                               ".//img[@src='/images/print.gif']/..", ".//img[@src='/images/add-this.gif']/..", ".//hr"]:
                    for node in contentNode.xpath(ixpath):
                        node.getparent().remove(node)
                contentHtml = lib.Etree.tostring(contentNode)
                t = Template(templateDef, searchList=[{'contents': contentHtml}])
                html = str(t)
#                 print html
                lib.file_put_content(html, "E:/test/test.html")
                return html, key
            return None, key
                
    def start(self):
        if len(self.urlList) == 0: return
        if os.path.exists(self.output): os.unlink(self.output)
        connection = sqlite3.connect(self.output)
        cursor = connection.cursor()
        cursor.execute("CREATE TABLE g_categories('name' VARCHAR(100), 'hash' VARCHAR(32), 'parent' VARCHAR(32), 'isLeaf' INTEGER DEFAULT 1, 'display' INTEGER AUTOINCREAMENT)")
        cursor.execute("CREATE TABLE g_content('hash' VARCHAR(32), 'content' TEXT)")
        cursor.execute("CREATE TABLE g_bookmark('name' VARCHAR(50), 'hash' VARCHAR(32))")
        cursor.execute("CREATE INDEX IF NOT EXISTS categories_hash_index ON g_categories(hash)")
        cursor.execute("CREATE INDEX IF NOT EXISTS categories_hash_leaf_index ON g_categories(hash, isLeaf)")
        cursor.execute("CREATE INDEX IF NOT EXISTS content_hash_index ON g_content(hash)")
        connection.commit()
        #### start crawler ####
        
        chapters = self.crawlChapters()
        print chapters
        
        
        pool = workerpool.WorkerPool(size=1)
           
        if len(self.urlList) == 1:
            for chapter in chapters:
                pool.put(CrawlJob(self.crawlDetail, chapter['url'], chapter['hash']))
        elif len(self.urlList) > 1:
            for data in chapters:
                for chapter in data:
                    pool.put(CrawlJob(self.crawlDetail, chapter['url'], chapter['hash']))
        pool.shutdown()
        pool.wait()
        if len(self.urlList) == 1:
            for chapter in chapters:
                name = chapter['name']
                detail = crawlResult.get(chapter['hash'], None)
                if detail == None: continue
                cursor.execute("INSERT INTO g_categories('name', 'hash') VALUES(?, ?)", [name.decode('iso-8859-1'), chapter['hash']])
                cursor.execute("INSERT INTO g_content('hash', 'content') VALUES(?, ?)", [chapter['hash'], detail.decode('iso-8859-1')])
                print name
                print detail
        elif len(self.urlList) > 1:
            for i in range(0, len(chapters)):
                catItem = self.urlList[i]
                catName = catItem['name']
                catHash = lib.md5(catItem['url'])
                print catName, catHash
                cursor.execute("INSERT INTO g_categories('name', 'hash', 'isLeaf') VALUES(?, ?, ?)", [catName.decode('iso-8859-1'), catHash, 0])
                data = chapters[i]
                for chapter in data:
                    name = chapter['name']
                    detail = crawlResult.get(chapter['hash'], None)
                    if detail == None: continue
                    cursor.execute("INSERT INTO g_categories('name', 'hash', 'parent') VALUES(?, ?, ?)", [name.decode('iso-8859-1'), chapter['hash'], catHash])
                    cursor.execute("INSERT INTO g_content('hash', 'content') VALUES(?, ?)", [chapter['hash'], detail.decode('iso-8859-1')])
                    print name
                    print detail
        
        #### end crawler ####
        cursor.close()
        connection.commit()
        connection.close()
        

if __name__ == '__main__':
    
    c = Crawler()
#     c.addSite('Ruby Programming', 'http://www.tutorialspoint.com/ruby/ruby_classes.htm')
    c.addSite('HTML5 Tutorial', 'http://www.tutorialspoint.com/html5/index.htm')
#     c.setOutput('ruby1')
#     c.start()    
    print c.crawlChapters()
#     print c.crawlDetail('http://www.tutorialspoint.com/html5/html5_editor.htm')
    
    logging.info("Finished")
    os._exit(1)
    
    
    

