# -*- coding: utf-8 -*-
import scrapy


class DoubanspiderSpider(scrapy.Spider):
    # 爬虫名
    name = 'doubanspider'

    # allowed_domains = ['https://movie.douban.com/top250']   # 限定只能爬取指定URL下的数据
    # 目标的URL
    start_urls = ['https://movie.douban.com/top250/']    #指定爬取的URL
    headers = {
        'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36',
        'Accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'
    }
    # 重写scrapy方法
    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(url=url, callback=self.parse, headers=self.headers)

    # 对返回的response如何进行分析爬取
    def parse(self, response):
        # 使用xPath解析器
        items = response.xpath("//div[@class='item']")
        # print(items)
        for item in items:
            yield{  #可以简单的理解为return 暂停执行动作，当使用next()函数，又继续往下执行
                "film_name": item.xpath(
                    'div[@class="info"]/div[@class="hd"]/a/span[@class="title"]/text()'
                ).extract_first(),   #因为有两个"title"所以用_first()取第一个
                "score": item.xpath(
                    'div[@class="info"]/div[@class="bd"]/div[@class="star"]/span[@class="rating_num"]/text()'
                ).extract(),
                "Introduction":item.xpath(
                    'div[@class="info"]/div[@class="bd"]/p[@class="quote"]/span[@class="inq"]/text()'
                ).extract(),
                "img":item.xpath(
                    'div[@class="pic"]/a/img/@src'
                ).extract()
            }
        next_urls = response.css('div.paginator span.next a::attr(href)').extract()

        # 判断是否还有下一页，如果有，就继续爬，直到最后一页
        if next_urls:
            next_url = 'https://movie.douban.com/top250'+next_urls[0]
            print(next_url)
            yield scrapy.Request(next_url, headers=self.headers)
