# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
import scrapy

from ..items import BooksNovelItem


class DoubanSpider(scrapy.Spider):
    name = 'douban'
    allowed_domains = ['douban.com']

    page = 0

    # 登录人的信息设置
    def start_requests(self):
        # 小说
        # url = 'https://book.douban.com/tag/%E5%B0%8F%E8%AF%B4'
        # 文学
        # url = 'https://book.douban.com/tag/%E6%96%87%E5%AD%A6?start=0&type=T'
        # 散文
        # url = 'https://book.douban.com/tag/%E6%95%A3%E6%96%87?start=0&type=T'
        # 诗歌
        url = 'https://book.douban.com/tag/%E8%AF%97%E6%AD%8C?start=0&type=T'
        reqbody = {
            "ck": "PLYR",
            "name": "13860662311",
            "password": "zjy474330989",
            "remember": "true"
        }
        yield scrapy.Request(url, callback=self.parse, dont_filter=True)

    # 解析页面
    def parse(self, response, **kwargs):
        # print(response.text)
        item = BooksNovelItem()
        # 获取跟目录信息
        info_list = response.xpath('//div[@class="info"]')
        print(info_list)
        for info in info_list:
            item['book_name'] = info.xpath("./h2/a/@title").extract_first().strip()
            some_info = (info.xpath("./div[@class='pub']/text()").extract_first().strip().split("/"))
            item['grade'] = info.xpath(
                "./div[@class='star clearfix']/span[@class='rating_nums']/text()").extract_first()
            item['count'] = (info.xpath(
                "./div[@class='star clearfix']/span[@class='pl']/text()").extract_first().replace("(", "")
                             .replace(")", "").strip())
            item['introduction'] = info.xpath("./p/text()").extract_first().replace("\n", "").strip()
            item['author'] = some_info[0]
            if len(some_info) == 4:
                item['press'] = some_info[1]
                item['publish_time'] = some_info[2]
                item['price'] = some_info[3]
            elif len(some_info) == 3:
                item['press'] = some_info[1]
                item['publish_time'] = some_info[2]
            elif len(some_info) == 5:
                item['press'] = some_info[2]
                item['publish_time'] = some_info[3]
                item['price'] = some_info[4]
            print(item)
            yield item

            if self.page <= 1000:
                self.page += 20
                # 小说
                # new_page_url = "https://book.douban.com/tag/%E5%B0%8F%E8%AF%B4?start=" + str(self.page) + "&type=T"
                # 文学
                # new_page_url = "https://book.douban.com/tag/%E6%96%87%E5%AD%A6?start=" + str(self.page) + "&type=T"
                # 散文
                # new_page_url = "https://book.douban.com/tag/%E6%95%A3%E6%96%87?start=" + str(self.page) + "&type=T"
                # 诗歌
                new_page_url = "https://book.douban.com/tag/%E8%AF%97%E6%AD%8C?start=" + str(self.page) + "&type=T"
                yield scrapy.Request(url=new_page_url, callback=self.parse)
