#!/usr/bin/python
#-*-coding:utf-8-*-
# 简易小说采集爬虫
# Author: Lukin<mylukin@gmail.com>
# Date  : 2008-09-25
import re
import sqlite3
import os.path
import string
import time
from threading import Thread
from Queue import Queue
from download import httpfetch
q = Queue()
MAXC = 8
basedb = './book.db'
urlbase = 'http://www.book8.com/'
rule_title = r'<h1>(.+)</h1>'
rule_author = r'<td width="25%">作&nbsp;&nbsp;&nbsp; 者：(.*)</td> <td width="25%">全文长度'
rule_clong = r'<td width="25%">全文长度：(.*)字</td>'
rule_type = r'<td width="25%">类&nbsp;&nbsp;&nbsp; 别：(.+)</td> <td width="25%">作&nbsp;&nbsp;&nbsp; 者'
rule_lasttime = r'</td> </tr> <tr> <td>最后更新：(.+)</td> <td>文章状态'
rule_pic = r'<a href=".*" target="_blank"><img src="(.*)" border="0" width="100" height="125" align="right" hspace="5" vspace="5" /></a>'
rule_last = r'<span class="hottext">最近章节：</span><a href="/Book/[0-9]+/[0-9]+/([0-9]+).html">(.+)</a><br />'
hottxt_start = '内容简介：</span>'
hottxt_end   = '<script type="text/javascript">'
conn = sqlite3.connect(os.path.abspath(basedb))
conn.text_factory = str


def thread_fetch():
    conn = sqlite3.connect(os.path.abspath(basedb))
    conn.text_factory = str
    while True:
		topic = q.get()
		proc_info(topic,conn)
		q.task_done()


def get_list():
    conn = sqlite3.connect(os.path.abspath(basedb))
    conn.text_factory = str    
    c = conn.cursor()
    c.execute('SELECT bid FROM BookList')
    q = c.fetchall()
    c.close()
    return q

def read_info(bid):
    bbid = string.atoi(bid,10)//1000
    #http://www.book8.com/Bookinfo/7/7929.htm
    url = urlbase + 'Bookinfo/'+ str(bbid) + '/' + bid + '.htm'
    headers = {"Accept": "*/*", "Referer": "http://www.book8.com/", "User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)"}
    req = ''
    for _ in range(3):
    	try:
    		req = httpfetch(url,headers,report=False)
    		break
    	except:
    		continue
    return req.decode('gbk','ignore').encode('utf-8')

def parse_info(html):
    info = {}
    info['title'] = clear(re.findall(rule_title,html)[0])
    info['author'] = re.findall(rule_author,html)[0]
    info['clong'] = re.findall(rule_clong,html)[0]
    if re.findall(rule_pic,html):
        info['pic'] = re.findall(rule_pic,html)[0]
    else:
        info['pic'] = '0'
    info['lasttime'] = re.findall(rule_lasttime,html)[0]
    info['type'] = re.findall(rule_type,html)[0]
    last = re.findall(rule_last,html)
    info['lastid'] = last[0][0]
    info['lastname'] = clear(last[0][1])
    info['hottxt'] = clear(sect(html,hottxt_start,hottxt_end))
    return info

def sect(html,start,end,cls=''):
    if len(html)==0 : return ;
    # 正则表达式截取
    if start[:1]==chr(40) and start[-1:]==chr(41) and end[:1]==chr(40) and end[-1:]==chr(41) :
        reHTML = re.search(start + '(.*?)' + end,html,re.I)
        if reHTML == None : return 
        reHTML = reHTML.group()
        intStart = re.search(start,reHTML,re.I).end()
        intEnd = re.search(end,reHTML,re.I).start()
        R = reHTML[intStart:intEnd]
    # 字符串截取
    else :
        # 取得开始字符串的位置
        intStart = html.lower().find(start.lower())
        # 如果搜索不到开始字符串，则直接返回空
        if intStart == -1 : return 
        # 取得结束字符串的位置
        intEnd = html[intStart+len(start):].lower().find(end.lower())
        # 如果搜索不到结束字符串，也返回为空
        if intEnd == -1 : return 
        # 开始和结束字符串都有了，可以开始截取了
        R = html[intStart+len(start):intStart+intEnd+len(start)]
    # 清理内容
    if cls != '' :
        R = clear(R,cls)
    # 返回截取的字符
    return R

def clear(str):
    str = string.replace(str,'amp;','')
    str = string.replace(str,'&nbsp;','')
    str = string.replace(str,'&gt;','')
    str = string.replace(str,'&lt;','')
    str = string.replace(str,'&lt;','')
    return str

def data_store(info,bid,conn):
    c = conn.cursor()
    tries = 0
    while tries<10:
    	try:
    		c.execute('UPDATE BookList SET title = ?,author = ?,clong = ? , pic= ? ,lastid = ? ,lastname = ? , hottxt =? ,type = ? ,lasttime = ? WHERE bid = ?;',(info['title'],info['author'],info['clong'], info['pic'] ,info['lastid'], info['lastname'] , info['hottxt'] , info['type'] , info['lasttime'] ,bid))
    		break
    	except:
    		tries += 1
    		time.sleep(5)
    		continue
    conn.commit()
    c.close()
    print '成功更新:'.decode('utf-8','ignore').encode('gbk'),info['title'].decode('utf-8','ignore').encode('gbk')


def proc_info(bid,conn):
    html = read_info(bid)
    info = parse_info(html)
    data_store(info,bid,conn)
    print '成功更新:'.decode('utf-8','ignore').encode('gbk'),info['title'].decode('utf-8','ignore').encode('gbk')

def getall():
    ids = get_list()
    for id in ids:
        q.put(str(id[0]))

getall()
for i in range(MAXC):
	t = Thread(target=thread_fetch)
	t.setDaemon(True)
	t.start()
q.join()