import scrapy
from scrapy.cmdline import execute
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from booknet.items import BooknetItem


class ReadSpider(CrawlSpider):
    name = "read"
    allowed_domains = ["www.dushu.com"]
    start_urls = ["https://www.dushu.com/book/1163.html"]

    rules = (Rule(LinkExtractor(allow=r"book/1163(_\d+)?\.html"), callback="parse_item", follow=True),)

    def parse_item(self, response):
        base_url = "https://www.dushu.com"

        div_list = response.xpath('//div[@class="book-info"]')
        for div in div_list:
            title = div.xpath('./h3/a/@title').extract_first()
            url = base_url + div.xpath('./h3/a/@href').extract_first()
            cover = div.xpath('.//img/@data-original').extract_first()
            author = div.xpath('./p[1]/text()').extract_first()
            status = div.xpath('./p[3]/span/text()').extract_first()
            book = BooknetItem(title=title, cover=cover, author=author, status=status)
            # 对第二页的链接发起访问
            yield scrapy.Request(url=url, callback=self.parse_second, meta={"book": book})

    def parse_second(self, response):
        # 确保获取的是传递的 book 对象

        # 价格，出版社
        item1 = response.xpath('//*[@id="ctl00_c1_bookleft"]')
        price = item1.xpath('./p/span/text()').extract_first()
        publisher = item1.xpath('./table//tr[2]/td[2]/text()').extract_first()

        # ISBN 和出版时间
        item2 = response.xpath('//div[@class="book-details"]/table')
        ibsn_code = item2.xpath('.//tr[1]/td[2]/text()').extract_first()
        publisher_time = item2.xpath('.//tr[1]/td[4]/text()').extract_first()

        # 作者简介 和 内容简介
        item3 = response.xpath('//div[@class="bookdetails-left"]')
        remark = item3.xpath('./div[4]/div/div/text()').extract_first()
        author_remark = item3.xpath('./div[5]/div/div/text()').extract_first()

        # 通过字典方式设置字段值
        book = response.meta.get("book")
        book['price'] = price
        book['publisher'] = publisher
        book['publisher_time'] = publisher_time
        book['ibsn_code'] = ibsn_code
        book['author_remark'] = author_remark
        book['remark'] = remark

        # self.logger.info(f"解析的数据: {book}")
        yield book


if __name__ == "__main__":
    execute(['scrapy', 'crawl', 'read'])

