# -*- coding: utf-8 -*-
'''
Created on Sep 1, 2014

@author: TRAM ANH
'''
import os
import sys
from urlparse import urljoin
import workerpool
# sys.path.append('C:/longhoanggiang/pyLib')
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import lib
import logging
import sqlite3
import json

logging.basicConfig(level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s', datefmt='%d/%m/%Y %H:%M:%S')




class Crawler():
    
    _url = 'http://vinawap.mobi/stories/truyen-nguoi-lon/'
    
    def getDetail(self, url, chapterName, storyName):
        data = []
        breakWhile = False
        while not breakWhile:
            try:
                tree = lib.Web.load(url, cached=True).build_tree()
                contentNode = tree.xpath("//div[@class='content']//div[@itemprop='articleBody']")
#                 content = lib.getText(contentNode[0]).strip()
                content = lib.Etree.tostring(contentNode)
                print content[:200]
                data.append(content)
                pageNodes = tree.xpath("//div[@class='page']/*")
                for node in pageNodes:
                    if node.tail == None or node.tail.strip() == '': continue
                    nextNode = node.getnext()
                    if nextNode == None: 
                        breakWhile = True
                        break
                    url = urljoin(url, nextNode.get('href'))
                if len(pageNodes) == 0: break
            except:
                logging.error("Error at url: {0}".format(url))
                raise
        result = []
        if len(data) == 1:
            content = "<strong>{0}</strong><br /><hr /><br />{1}".format(chapterName, data[0])
            result.append({'name': chapterName, 'content': content})
        else:
            idx = 1
            for c in data:
                cName = "{0} - {1} ({2})".format(storyName, chapterName, idx) if storyName != chapterName else "{0} - ({1})".format(chapterName, idx)
                content = "<strong>{0}</strong><br /><hr /><br />{1}".format(cName, c)
                result.append({'name': cName, 'content': content})
                idx += 1
        return result
    
    def getChapters(self, url, storyName):
        tree = lib.Web.load(url, cached=True).build_tree()
        data = []
        for node in tree.xpath("//p[contains(., 'TRỌN BỘ')]/../following-sibling::*[1]/ul/li/a".decode('utf-8')):
            name = lib.stringify(node)
            link = urljoin(url, node.get('href'))
            if 'tin-tuc' in link: continue
            data.append({'name': name, 'url': link})
        data.reverse()
        if len(data) == 0:
            data.append({'name': storyName, 'url': url})
        return data
    
    def getStories(self, startPage=1, endPage=2):
        
        path = "/longhoanggiang/database/truyen-nguoi-lon-vinawap-{0}{1}".format(startPage, endPage)
        if os.path.exists(path):
            os.unlink(path)
        connection = sqlite3.connect(path)
        self.createTable(connection)
        cursor = connection.cursor()
        
        
        check = {}
        for page in range(startPage, endPage + 1):
            url = self._url if page == 1 else self._url + "page/{0}/".format(page)
            tree = lib.Web.load(url, cached=True).build_tree()
            for node in tree.xpath("//div[@class='listupdate liststory']/ul/li/a"):
                name = lib.stringify(node)
                link = node.get('href')
                if link == '': continue
                link = urljoin(url, link)
                key = lib.md5(link)
                if check.get(key, False): continue
                check[key] = True
                print name, link
                cursor.execute("INSERT INTO udv_content('hash', 'name', 'is_header') VALUES(?, ?, 1)", [lib.md5(link), name.decode('utf-8')])
                chapters = self.getChapters(link, name)
                for chapter in chapters:
                    check[lib.md5(chapter['url'])] = True
                    contents = self.getDetail(chapter['url'], chapter['name'], name)
                    for c in contents:
                        detail = c['content']
                        cursor.execute("INSERT INTO udv_content('name', 'content') VALUES(?, ?)", [c['name'].decode('utf-8'), buffer(lib.compressStr(json.dumps(detail)))])
        
        cursor.close()
        connection.commit()
        connection.close()
    
    def createTable(self, connection):
        cursor = connection.cursor()
        cursor.execute("CREATE TABLE IF NOT EXISTS udv_content('id' INTEGER PRIMARY KEY AUTOINCREMENT, 'hash' VARCHAR(32), 'name' VARCHAR(200), 'content' BLOB, 'is_header' INTEGER DEFAULT 0)")
        cursor.execute("CREATE INDEX IF NOT EXISTS hash_index ON udv_content(hash)")
        cursor.execute("CREATE INDEX IF NOT EXISTS id_index ON udv_content(id)")
        cursor.close()
         
            
if __name__ == '__main__':
    
    c = Crawler()
    c.getStories(13, 25)
#     print c.getChapters('http://vinawap.mobi/story/sieu-pham-nhung-nguoi-con-gai-di-ngang-doi-toi/')
#     print c.getDetail('http://vinawap.mobi/story/nhung-nguoi-con-gai-di-ngang-doi-toi-chap-1/2/')
    
    logging.info("Finished")
    os._exit(1)