# -*- coding: utf-8 -*-
'''
Created on Jul 7, 2014

@author: LONG HOANG GIANG
'''
import os
import sys
from urlparse import urljoin
# sys.path.append('C:/longhoanggiang/pyLib')
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import logging
import sqlite3
import json

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')




def createTable(connection):
    cursor = connection.cursor()
    cursor.execute("CREATE TABLE IF NOT EXISTS udv_content('id' INTEGER PRIMARY KEY AUTOINCREMENT, 'hash' VARCHAR(32), 'name' VARCHAR(200), 'content' BLOB, 'is_header' INTEGER DEFAULT 0)")
    cursor.execute("CREATE INDEX IF NOT EXISTS hash_index ON udv_content(hash)")
    cursor.execute("CREATE INDEX IF NOT EXISTS id_index ON udv_content(id)")
    cursor.close()

def getDetail(url):
    tree = lib.Web.load(url, cached=False).build_tree(base_url=url)
    sectionNode = tree.xpath("//div[@class='section']")
    content = lib.getArticleContent(lib.Etree.tostring(sectionNode))
    t = lib.buildTreeFromHtml(content)
    for img in t.xpath(".//img"):
        img.set('width', '100%')
    return lib.Etree.tostring(t)

def crawl(database):
    path = "/longhoanggiang/database/{0}".format(database)
    if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path), 0777)
    if os.path.exists(path): os.unlink(path)
    connection = sqlite3.connect(path)
    createTable(connection)
    tree = lib.Web.load(url, cached=False).build_tree()
    xpathLst = ["//div[@class='book']/div[@class='toc']//span[@class='chapter']/a"]
    for xpathStr in xpathLst:
        nodes = tree.xpath(xpathStr)
        for node in nodes:
            catName = lib.stringify(node)
            print catName
            cursor = connection.cursor()
            cursor.execute("INSERT INTO udv_content('name', 'is_header') VALUES(?, ?)", [catName.decode('utf-8'), 1])
            cursor.close()
            for inode in node.xpath("./../following-sibling::ul/li/span/a"):
                link = urljoin(url, inode.get('href'))
                name = lib.stringify(inode).strip()
                if name == '': continue
                print name, link
                detail = getDetail(link)
                print detail
                cursor = connection.cursor()
                cursor.execute("INSERT INTO udv_content('name', 'content') VALUES(?, ?)", [name.decode('utf-8'), buffer(lib.compressStr(json.dumps(detail)))])
                cursor.close()
        if len(nodes) > 0: break
    connection.commit()
    connection.close()
    logging.info("saved database in {0}".format(path))
        
        
if __name__ == '__main__':
    
    url = 'http://www.diveintopython.net/toc/index.html'
    
    crawl('diveintopython.ampp')
    
#     print getDetail('http://www.homeandlearn.co.uk/java/opening_files.html')
       
    logging.info("Finished")
    os._exit(1) 
    
        
    

    
    