# -*- coding: utf-8 -*-
import scrapy
from scrapy.http.response.html import HtmlResponse
from ..items import DoubanbookItem,ItemType

class BookSpider(scrapy.Spider):
    name = 'book' #爬虫名字
    allowed_domains = ['douban.com'] #域名，爬取范围
    start_urls = ['https://book.douban.com/tag/%E7%BC%96%E7%A8%8B?start=0&type=T'] #起始url,第一页开始爬取
    #start_urls = ['https://movie.douban.com/']
    #下载器获取webserver的response ， parse就是解析响应response的内容
    def parse(self, response:HtmlResponse):

        subjects = response.xpath('//li[@class="subject-item"]')
        print('response.request.headers["User-Agent"]>>>>>>>>>>>>>>>>>>>>{}'.format(response.request.headers['User-Agent']));
        list = []
        for subject in subjects:
            item = DoubanbookItem()
            item['itemName'] = ItemType.DOUB_BOOK.value
            title = subject.xpath('.//h2/a/text()').extract()
            rate = subject.xpath('./div[@class="info"]//span[@class="rating_nums"]/text()').extract()
            if title and rate:
                item['title'] = title[0].strip()
                print('rate>>>{}'.format(rate))
                item['rate'] = rate[0].strip()
                #print('--------------------------item>>>{}'.format(item));
                list.append(item)
            else:
                print('title>>>{},   rate>>{}'.format(title,rate))
            yield item

        print('list>>>>>>>{}'.format(list))
        #yield list
        next_tag = response.xpath('//div[@id="content"]//div[@class="paginator"]/span[@class="next"]/link/@href').extract_first()
        if next_tag is not None:
            yield scrapy.Request(url='https://book.douban.com' + next_tag, callback=self.parse)


        def parse_page(self, response):
            # 调出传进来的字典
            item = response.meta["list"]
            item["haibaio"] = response.xpath("//div[@id='Zoom']//p[1]//img[1]/@src").extract_first()
            item["content"] = response.xpath("//div[@id='Zoom']//p[1]/text()").extract()
            item["zhongzi"] = response.xpath("//div[@id='Zoom']//table//a/text()").extract_first()
            # print(item["content"])
            # print(item)
            #传入pip管道使其下载保存
            yield item





