import re

import scrapy
from scrapy_redis.spiders import RedisSpider


class Top250Spider(RedisSpider):
    name = 'top250'
    allowed_domains = ['douban.com', 'doubanio.com']

    # 如果继承了RedisSpider类，那么就没有必要写start_urls
    # 原因：这个代码如果要实现分布式，也就意味着在别的节点（即服务器上）也有相同的代码，如果每次都是从相同的
    #      start_urls中的URL开始爬取，会有很多次的重复，显然这是没有必要的
    # start_urls = ['https://movie.douban.com/top250?start=0&filter=']

    def parse(self, response):
        print("-----回调函数被执行----", response.url)

        # 从URL中提取page（https://movie.douban.com/top250?start=50&filter=）
        ret = re.match(r"https://movie.douban.com/top250\?start=(\d+).*", response.url)
        if ret:
            start_num = int(ret.group(1))
        else:
            start_num = 0

        # 对start_urls中的URL对应的响应进行处理
        # response.url/headers/body/request.url/status
        li_list = response.xpath("//ol[@class='grid_view']/li")
        for li_temp in li_list:
            img_src = li_temp.xpath(".//img/@src").extract_first()
            title = li_temp.xpath(".//span[@class='title'][1]/text()").extract_first()
            rating_num = li_temp.xpath(".//span[@class='rating_num']/text()").extract_first()
            people_num = li_temp.xpath(".//div[@class='star']/span[4]/text()").extract_first()

            print("---->", img_src, title, rating_num, people_num)

            # 生成信息，用来传递给管道 让其保存到CSV文件
            yield {
                "type": "info",
                "img_src": img_src,
                "title": title,
                "rating_num": rating_num,
                "people_num": people_num
            }

            # 生成新的Request对象（实现下载图片）
            yield scrapy.Request(url=img_src, callback=self.parse_img, cb_kwargs={"img_name": title})

        # 提取下一页的URL
        # next_page_url = response.xpath("//span[@class='next']/a/@href").extract_first()
        # next_page_url = "https://movie.douban.com/top250" + next_page_url
        # 生成下一页的地址

        # self.page += 1
        start_num += 25
        # print("------------------------------------------->>>>page=", self.page)
        print("------------------------------------------->>>>start_num=", start_num)
        # if self.page <= 10:
        if start_num <= 225:
            # next_page_url = "https://movie.douban.com/top250?start=%d&filter=" % ((self.page - 1) * 25)
            next_page_url = "https://movie.douban.com/top250?start=%d&filter=" % start_num
            yield scrapy.Request(url=next_page_url, callback=self.parse)

    def parse_img(self, response, img_name):
        print("------图片处理的回调函数------")
        # 生成图片信息
        yield {
            "type": "img",
            "img_name": img_name + ".jpg",
            "img_bytes": response.body
        }
