from urllib.parse import urljoin
import scrapy
from scrapy import cmdline
from scrapy.http import HtmlResponse
from dbdy_test.items import DbdyTestItem


class Dbtop250Spider(scrapy.Spider):
    name = "dbtop250"
    allowed_domains = ["movie.douban.com"]
    start_urls = ["https://movie.douban.com/top250"]

    def parse(self, response: HtmlResponse, **kwargs):
        # print(response.url,response.status,response.headers,response.body)
        # print(response.text,response.encoding,response.request)
        # response.xpath()
        # response.css()
        # response.selector()   urljoin 拼接url

        li_lis = response.xpath('//*[@id="content"]/div/div[1]/ol/li')
        for li in li_lis:
            item = DbdyTestItem()
            item['title'] = li.xpath('./div/div[2]/div[1]/a/span[1]/text()').extract_first()
            item['rating'] = li.xpath('./div/div[2]/div[2]/div/span[2]/text()').extract_first()
            item['quote'] = li.xpath('./div/div[2]/div[2]/p[2]/span/text()').extract_first()
            # print(item)
            item['detail_url'] = li.xpath('./div/div[1]/a/@href').extract_first()
            print(item['detail_url'])
            yield scrapy.Request(url=item['detail_url'],
                                 callback=self.movie_detail,
                                 meta={'item': item})  # 请求传参

        # 判断是否有下一页的按钮 进行全部爬取
        if response.xpath('//*[@id="content"]/div/div[1]/div[2]/span[3]/a/@href').extract_first():
            next_url = response.urljoin(
                response.xpath('//*[@id="content"]/div/div[1]/div[2]/span[3]/a/@href').extract_first())
            yield scrapy.Request(next_url, callback=self.parse)

    # def start_requests(self):
    #     """
    #     重写方法 手动翻页 可控制页数
    #     :return:
    #     """
    #     for page in range(0, 10):
    #         next_url = f"https://movie.douban.com/top250?start={page*25}&filter="
    #         yield scrapy.Request(next_url, callback=self.parse)

    def movie_detail(self, response):
        item = response.meta['item']
        item['info_data'] = response.xpath('//*[@id="link-report-intra"]/span[1]/span/text()')
        yield item


if __name__ == '__main__':
    # 添加 --nolog 可以取消输出日志
    cmdline.execute('scrapy crawl dbtop250'.split())
