import scrapy
from scrapy import Selector, Request
from scrapy.http import HtmlResponse
from scrapy2024.items import MovieItem


class DoubanSpider(scrapy.Spider):
    # 爬虫名称
    name = "douban"
    # 允许的域名
    allowed_domains = ["movie.douban.com"]

    # 起始的URL
    # start_urls = ["https://movie.douban.com/top250"]
    def start_requests(self):
        for i in range(10):
            yield Request(url=f"https://movie.douban.com/top250?start={i * 25}"
                          )  # 'socks5://127.0.0.1:xxxx' 本地 vpn meta={'proxy': 'http://127.0.0.1:1080'}

    def parse(self, response: HtmlResponse, **kwargs):
        # 选择器
        sel = Selector(response)
        # 获取电影列表
        list_items = sel.xpath('//*[@id="content"]/div/div[1]/ol/li')
        # 遍历电影列表
        for li in list_items:
            # 各个电影详情页的url
            detail_url = li.css('div.info > div.hd > a::attr(href)').extract_first()

            # 实例化电影item
            movie_item = MovieItem()
            # css
            # movie_item['title'] = li.css("span.title::text").extract_first()
            # movie_item['rank'] = li.css("span.rating_num::text").extract_first()
            # movie_item['subject'] = li.css("span.inq::text").extract_first()
            # xpath
            movie_item['title'] = li.xpath('./div/div[2]/div[1]/a/span[1]/text()').extract_first()
            movie_item['rank'] = li.xpath('./div/div[2]/div[2]/div/span[2]/text()').extract_first()
            movie_item['subject'] = li.xpath('./div/div[2]/div[2]/p[2]/span/text()').extract_first()
            # Request返回的请求默认会从parse这个回调函数去执行解析
            # 返回电影详情页的请求 需要一个新的解析方法 不再是现在的parse
            yield Request(
                url=detail_url,
                callback=self.parse_detail,
                cb_kwargs={'item': movie_item})  # 关键词传参 需要把组装的东西传给下面的方法
        # #获取剩下的url ::attr(href) 获取元素的href属性
        # hrefs_list = sel.css('div.paginator > a::attr(href)')
        # for href in hrefs_list:
        #     new_url = href.extract()
        #     #拼接url的方法 该方法返回的是一个字符串
        #     url = response.urljoin(new_url)
        #     #要把url构造成请求对象 创建一个新的请求 交给引擎
        #     yield Request(url=url)

    def parse_detail(self, response: HtmlResponse, **kwargs):
        movie_item = kwargs.get('item')
        sel = Selector(response)
        movie_item['duration'] = sel.xpath('//*[@id="info"]/*[@property="v:runtime"]/@content').extract_first()
        movie_item['num_evaluated'] = sel.xpath('//*[@class="rating_sum"]/a/span/text()').extract_first()
        yield movie_item
