import re

import scrapy

from ..items import MovieItem

"""
1、scrapy 调试指令 scrapy shell + 网址  
他会把响应对象保存  
我们后续就只要对响应对象进行操作来看代码写对没而不是写一段代码就运行一遍程序
减少项目在编写调试时对网址的请求数量 有效避免被封ip的风险
    
"""

class Ssr1MoviesSpider(scrapy.Spider):
    name = "ssr1_movies"
    allowed_domains = ["ssr1.scrape.center"]
    start_urls = [f"https://ssr1.scrape.center/page/{i}" for i in range(1,2)]

    def parse(self, response):
        """
        获取这一页每个电影的详情地址
        :param response:
        :return:
        """
        # print(response.text)
        detail_urls = re.findall(r'/detail/\d+',response.text)
        # print(detail_urls)
        for url in detail_urls:
            return scrapy.Request(url='https://ssr1.scrape.center' + url,callback=self.parse_detail)
            # yield scrapy.Request(url='https://ssr1.scrape.center' + url,callback=self.parse_detail)

    def parse_detail(self, response):
        # print(response.url)
        item = MovieItem()
        item['id'] = response.url.split('/')[-1]
        item['title'] = response.xpath('//h2[@class="m-b-sm"]/text()').get()
        item['category'] = '、'.join(response.xpath('//div[@class="categories"]//span/text()').getall())
        item['addr'] = response.xpath('//div[contains(@class,"info")]/span[1]/text()').get()
        item['timer'] = response.xpath('//div[contains(@class,"info")]/span[3]/text()').get()
        item['date'] = response.xpath('//div[contains(@class,"info")][2]/span/text()').get()
        item['drama'] = response.xpath('//div[@class="drama"]/p/text()').get()
        item['score'] = response.xpath('//p[contains(@class,"score ")]/text()').get()
        item['cover'] = response.xpath('//img[@class="cover"]/@src').get()
        print(item.items())

