# -*- coding: utf-8 -*-#
"""
@File : douban.py
@Description :
@Author : Le.Qing
@Create Time : 2025-05-06 11:12
"""
import json
import time

import scrapy
from scrapy.http import HtmlResponse


# 自定义的爬虫类 , 继承scrapy.Spider
class DoubanSpider(scrapy.Spider):
    # 爬虫的名字
    name = 'douban'
    # 允许采集的域名范围
    allowed_domains = ['douban.com']
    # 开始采集的网址
    start_urls = ['https://movie.douban.com/top250']

    def start_requests(self):
        """
        执行原理：
        scarpy crawl douban项目一启动，就会几乎同时发起10个请求
        CONCURRENT_REQUESTS = 16  # 默认 8，增大可提高并发
        DOWNLOAD_DELAY = 0.5      # 请求延迟（秒），避免被封
        :return:
        """
        for i in range(10):
            url = 'https://movie.douban.com/top250?start={}&filter='.format(i * 25)
            print('url为：', url)
            yield scrapy.Request(
                url=url,
                method='GET',
                callback=self.parse_response
            )
    # 数据提前的方法, 接受引擎给的响应对象
    # 爬虫文件当中一定要有parse函数
    def parse_response(self, response: HtmlResponse, **kwargs):
        print("拿到了第{}页的数据".format(response.xpath('//span[@class="thispage"][1]/text()').get()))
        # li.xpath('.//span[@class="title"][2]/text()').get()
        for li in response.xpath('//ol[@class="grid_view"]/li'):
            title = li.xpath('.//span[@class="title"][1]/text()').get()
            title2 = li.xpath('.//span[@class="title"][2]/text()').get()
            if title2:
                title += ('/'+ title2.replace('\xa0/\xa0',''))
            item = {
                "title": title,
                "rating_num": li.xpath('.//span[@class="rating_num"]/text()').get(),
                "quote": li.xpath('.//p[@class="quote"]/span/text()').get(),
            }
            # print(item)  # item就是一条记录
            yield item  # 跳转到管道文件中去执行process_item函数，可以用来保存这个item到数据库中

        # if response.xpath("//a[text()='后页>']/@href").extract_first() != None:
        #     next_url = response.urljoin(response.xpath("//a[text()='后页>']/@href").extract_first())
        #     print('next_url为：', next_url)
        #     time.sleep(1)
        #     yield scrapy.Request(
        #         url=next_url,
        #         method='GET',
        #         callback=self.parse_response
        #     )


if __name__ == '__main__':
    # 命令行执行代码
    from scrapy import cmdline
    cmdline.execute('scrapy crawl douban'.split())
