# -*- coding: utf-8 -*-
'''
Created on Jul 13, 2014

@author: LONG HOANG GIANG
'''
import os
import sys
from urlparse import urljoin
import workerpool
sys.path.append('C:/longhoanggiang/pyLib')
# sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import logging
import sqlite3
import json

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')

crawlResult = {}

class CrawlJob(workerpool.Job):
    
    def __init__(self, callback, *args):
        self.callback = callback
        self.args = args
        
    def run(self):
        detail, key = self.callback(*self.args)
        global crawlResult
        crawlResult[key] = detail
        
def crawlCategories(url):
    data = []
    while True:
        tree = lib.Web.load(url, cached=True).build_tree()
        for node in tree.xpath("//div[@id='main']/article[contains(@id, 'post')]"):
            title = lib.stringify(node.xpath(".//h2[@class='story_title']"))
            nodeContent = node.xpath(".//div[@class='entry-content']")[0]
            lib.Etree.cleanNodeNextSibling(nodeContent.xpath(".//div[@class='post-ratings']"), True)
            content = lib.getText(nodeContent)
            data.append({'title': title, 'content': content})
            print title
            print '----------------'
            print content
            print '----------------'
            
        nextNode = tree.xpath("//div[@class='wp-pagenavi']/a[@class='nextpostslink']")
        if len(nextNode) == 0: break
        url = urljoin(url, nextNode[0].get('href'))
    return data

def createTable(connection):
    cursor = connection.cursor()
    cursor.execute("CREATE TABLE IF NOT EXISTS categories('id' INTEGER PRIMARY KEY AUTOINCREMENT, 'name' VARCHAR(150))")
    cursor.execute("CREATE TABLE IF NOT EXISTS contents('id' INTEGER PRIMARY KEY AUTOINCREMENT, 'catId' INTEGER, 'name' VARCHAR(200), 'content' BLOB, 'hash' VARCHAR(32))")
    cursor.execute("CREATE INDEX IF NOT EXISTS categories_id_index ON categories(id)")
    cursor.execute("CREATE INDEX IF NOT EXISTS contents_id_index ON contents(id)")
    cursor.execute("CREATE INDEX IF NOT EXISTS contents_catId_index ON contents(catId)")
    cursor.close()
        
def crawl(url):
    output = '/longhoanggiang/crawl/cuoibebung/data.sqlite'
    if not os.path.exists(os.path.dirname(output)): os.makedirs(os.path.dirname(output), 0777)
    connection = sqlite3.connect(output)
    createTable(connection)
    tree = lib.Web.load(url).build_tree()
    cursor = connection.cursor()
    for node in tree.xpath("//div[@class='sidebar-inside']//section[contains(@id, 'categories')]//li/a"):
        name = lib.stringify(node)
        if name == '': continue
        link = urljoin(url, node.get('href'))
        print name, link
        if not isinstance(name, unicode): name = name.decode('utf-8')
        cursor.execute("INSERT INTO `categories`(`name`) VALUES(?)", [name])
        catId = cursor.lastrowid
        items = crawlCategories(link)
        for item in items:
            cursor.execute("INSERT INTO `contents`(`catId`, `name`, `content`) VALUES(?, ?, ?)", [catId, item['title'].decode('utf-8'), buffer(lib.compressStr(json.dumps(item['content'])))])
        
    cursor.close()
    connection.commit()
    connection.close()
    
    
if __name__ == '__main__':
    
    crawl('http://www.cuoibebung.com/')
    
    logging.info("Finished")
    os._exit(1)    



    