import re

import scrapy


class Top250Spider(scrapy.Spider):
    name = 'top250'
    allowed_domains = ['douban.com', 'doubanio.com']
    start_urls = ['https://movie.douban.com/top250?start=0&filter=']

    # page = 1

    def start_requests(self):
        # 重写start_requests方法，即可覆盖父类的
        # 也就可以实现默认要发送的URL请求是过滤的
        for url in self.start_urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        print("-----回调函数被执行----", response.url)

        # 注意：不要通过类属性、或者实例属性计算得到下一页的URL，而是在当前处理的URL基础上得到下一页的URL
        # https://movie.douban.com/top250?start=75&filter=
        ret = re.match(r".*start=(\d+).*", response.url)
        if ret:
            start_number = int(ret.group(1))
        else:
            start_number = 0

        # 对start_urls中的URL对应的响应进行处理
        # response.url/headers/body/request.url/status
        li_list = response.xpath("//ol[@class='grid_view']/li")
        for li_temp in li_list:
            img_src = li_temp.xpath(".//img/@src").extract_first()
            title = li_temp.xpath(".//span[@class='title'][1]/text()").extract_first()
            rating_num = li_temp.xpath(".//span[@class='rating_num']/text()").extract_first()
            people_num = li_temp.xpath(".//div[@class='star']/span[4]/text()").extract_first()

            print("---->", img_src, title, rating_num, people_num)

            # 生成信息，用来传递给管道 让其保存到CSV文件
            yield {
                "type": "info",
                "img_src": img_src,
                "title": title,
                "rating_num": rating_num,
                "people_num": people_num
            }

            # 生成新的Request对象（实现下载图片）
            # 当我们生成新的Request请求对象的时候，默认是过滤的（也就是说只要URL相同那么这个就不再爬取）
            yield scrapy.Request(url=img_src, callback=self.parse_img, cb_kwargs={"img_name": title})

        # 提取下一页的URL
        # next_page_url = response.xpath("//span[@class='next']/a/@href").extract_first()
        # next_page_url = "https://movie.douban.com/top250" + next_page_url
        # 生成下一页的地址
        # self.page += 1
        start_number += 25
        # print("---------------将要生成的下一页的URL中page------->>>>>page=", self.page)
        # if self.page <= 10:
        if start_number <= 225:
            # next_page_url = "https://movie.douban.com/top250?start=%d&filter=" % ((self.page - 1) * 25)
            next_page_url = "https://movie.douban.com/top250?start=%d&filter=" % start_number
            yield scrapy.Request(url=next_page_url, callback=self.parse)

    def parse_img(self, response, img_name):
        print("------图片处理的回调函数------")
        # 生成图片信息
        yield {
            "type": "img",
            "img_name": img_name + ".jpg",
            "img_bytes": response.body
        }
