#!/usr/bin/python
#-*-coding:utf-8-*-
# 简易小说采集爬虫
# Author: stimjoe<stimjoe@gmail.com>
# Date  : 2010-07-14
import re
import sqlite3
import os.path
import string
import time
from threading import Thread
from Queue import Queue
from download import httpfetch
urlbase = 'http://www.book8.com/'
rule_lists = '<a href="([0-9]+).html">(.+)</a>'
bid = ''
def createDatabase(bid,conn1):
    c = conn1.cursor()
    # 创建url列表存放表
    c.execute('SELECT * FROM sqlite_master WHERE name = \'List\';')
    if c.fetchall():
        c.execute('DROP TABLE \'List\'')
        conn1.commit()
    c.execute('''CREATE TABLE IF NOT EXISTS List (  id INTEGER  PRIMARY KEY, Cid INTEGER, Title TEXT(60) );''')
    c.execute('''CREATE TABLE IF NOT EXISTS Book (  id INTEGER  PRIMARY KEY, Title TEXT ,text text );''')
    # 事物提交
    conn1.commit()
    c.close()


def get_list():
    conn = sqlite3.connect(os.path.abspath('./book.db'))
    conn.text_factory = str    
    c = conn.cursor()
    c.execute('SELECT bid FROM BookList')
    q = c.fetchall()
    c.close()
    return q

def read_Book_list(bid):
    bbid = string.atoi(bid,10)//1000
    #http://www.book8.com/Book/8/8313/index.html
    url = urlbase + 'Book/'+ str(bbid) + '/' + bid + '/index.html'
    headers = {"Accept": "*/*", "Referer": "http://www.book8.com/", "User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)"}
    req = ''
    for _ in range(3):
    	try:
    		req = httpfetch(url,headers,report=False)
    		break
    	except:
    		continue
    return req.decode('gbk','ignore').encode('utf-8')

def parse_book_list(html):
    lists = re.findall(rule_lists,html)
    return lists

def data_store_lists(lasts,conn1):
    c = conn1.cursor()
    i = 1
    for last in lasts:
        if cmp(last[1], '本章为防采集章节') <> 0 :
            i = i + 1
            c.execute('SELECT * FROM List WHERE Cid = ?;',(last[0],))
            if c.fetchone():
                print name[0]
                #c.execute('UPDATE Book (Bid,Title) VALUES (?,?);',(name[0],name[1]))
            else:
                c.execute('INSERT INTO List (Cid,Title) VALUES (?,?);',(last[0],last[1]))
    conn1.commit(); c.close()
    print '成功入库:'.decode('utf-8','ignore').encode('gbk') + str(i) +'章'.decode('utf-8','ignore').encode('gbk')

def read_book_content(bid,cid):
    bbid = string.atoi(bid,10)//1000
    #http://www.book8.com/Bookinfo/7/7929.htm
    url = urlbase + 'Book/'+ str(bbid) + '/' \
    + bid + '/' + cid + '.html'
    print url
    headers = {"Accept": "*/*", "Referer": "http://www.book8.com/",\
     "User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)"}
    req = ''
    for _ in range(3):
    	try:
    		req = httpfetch(url,headers,report=False)
    		break
    	except:
    		continue
    return req.decode('gbk','ignore').encode('utf-8')

           
def parse_book_content(html):
    con = {}
    content = sect(html,'<div id="content">','<div align=center id="BookSee_Btm">')
    con['content'] = clear(content)
    con['title'] = clear(re.findall(r'<div id="title">.+<br>(.+)</div>',html)[0])
    return con
def data_store_content(con,cid,conn1):
    c = conn1.cursor()
    if con['content'] != None or con['title'] != None:
        c.execute('SELECT * FROM Book where id = ?',(cid,))
        r = c.fetchone()
        if r:
           c.execute('DELETE FROM Book where id = ?',(cid,))
           c.execute('INSERT INTO Book (id,Title,text) VALUES (?,?,?);',(cid,con['title'],con['content'],))
        else:
           c.execute('INSERT INTO Book (id,Title,text) VALUES (?,?,?) ;',(cid,con['title'],con['content'],))
           conn1.commit(); c.close()
    print cid
    
def get_data_list(bid,conn1):
    c = conn1.cursor()
    c.execute('SELECT * FROM List ;')
    r = c.fetchall()
    c.close
    return r
def clear(str):
    str = string.replace(str,'amp;','')
    str = string.replace(str,'&nbsp;','')
    str = string.replace(str,'&gt;','')
    str = string.replace(str,'&lt;','')
    str = string.replace(str,'&lt;','')
    str = string.replace(str,'</div>','')
    return str


def sect(html,start,end,cls=''):
    if len(html)==0 : return ;
    # 正则表达式截取
    if start[:1]==chr(40) and start[-1:]==chr(41) and end[:1]==chr(40) and end[-1:]==chr(41) :
        reHTML = re.search(start + '(.*?)' + end,html,re.I)
        if reHTML == None : return 
        reHTML = reHTML.group()
        intStart = re.search(start,reHTML,re.I).end()
        intEnd = re.search(end,reHTML,re.I).start()
        R = reHTML[intStart:intEnd]
    # 字符串截取
    else :
        # 取得开始字符串的位置
        intStart = html.lower().find(start.lower())
        # 如果搜索不到开始字符串，则直接返回空
        if intStart == -1 : return 
        # 取得结束字符串的位置
        intEnd = html[intStart+len(start):].lower().find(end.lower())
        # 如果搜索不到结束字符串，也返回为空
        if intEnd == -1 : return 
        # 开始和结束字符串都有了，可以开始截取了
        R = html[intStart+len(start):intStart+intEnd+len(start)]
    # 清理内容
    if cls != '' :
        R = clear(R,cls)
    # 返回截取的字符
    return R

def make(bid):
    dbname = './db/' + bid + '.db'
    conn1 = sqlite3.connect(os.path.abspath(dbname))
    conn1.text_factory = str    
    createDatabase(bid,conn1)
    data_store_lists(parse_book_list(read_Book_list(bid)),conn1)
    ids = get_data_list(bid,conn1)
    for id in ids:
        q.put(id[0])

q = Queue()
MAXC = 8

def proc_info(cid,bid,conn1):
    con = parse_book_content(read_book_content(bid,str(id[1])))
    data_store_content(con,str(id[0]),conn1)
    
def thread_fetch():
    global bid
    dbname = './db/' + bid + '.db'
    conn1 = sqlite3.connect(os.path.abspath(dbname))
    conn1.text_factory = str
    while True:
		topic = q.get()
		proc_info(topic,bid,conn1)
		q.task_done()

for i in range(MAXC):
	t = Thread(target=thread_fetch)
	t.setDaemon(True)
	t.start()

for bookid in get_list():
    bid = str(bookid[0])
    make(bid)
q.join()
    