# -*- coding: utf-8 -*-
import scrapy
import time
import re
from bs4 import BeautifulSoup
from bookspider.items import BookspiderItem

class BooktxtSpider(scrapy.Spider):
    name = 'booktxt'
    allowed_domains = ['www.booktxt.net']
    start_urls = ['https://www.booktxt.net/xiaoshuodaquan/']


    def parse(self, response):
        response.body.decode('gbk')
        soup = BeautifulSoup(response.body, 'html.parser')

        

        type_list = soup.select('#main div ul')
        for books in type_list:
            book_list = books.find_all('li')
            for book in book_list:
                href = book.a['href']
                yield scrapy.Request(url=href, callback=self.parse_detail)

    def parse_detail(self, response):
        time.sleep(0.1)
        response.body.decode('gbk')
        soup = BeautifulSoup(response.body, 'html.parser')

        # 分析页面元素，得到作者，书名，描述，全本下载地址
        name = soup.select('.box_con #maininfo #info h1')[0].string
        author = soup.select('.box_con #maininfo #info p')[0].text.split('：')[1]
        showurl = response.url
        desc = soup.select('.box_con #maininfo #intro p')[0].string
        down_url = soup.select('.box_con #maininfo #info p font[color]')[0].a['href']
        booktype = str(re.search(r'.+ > (.+) > .+', soup.select('.box_con .con_top')[0].text).group(1)).rstrip('小说 ')
        
        item = BookspiderItem()
        item['name'] = name
        item['author'] = author
        item['showUrl'] = showurl
        item['booktype'] = booktype
        item['describe'] = desc
        item['downUrl'] = down_url
        item['wordNum'] = None
        item['state'] = None

        yield item