import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import MovieItem

"""
通过命令 scrapy genspider -t crawl ssr1_movies_c ssr1.scrape.center生成
继承CrawlSpider 能够快速的帮我们找到符合规则的链接地址  
放到调对象 度器 再交给下载器进行请求 得到响应再交给Spider进行解析


"""

class Ssr1MoviesCSpider(CrawlSpider):
    name = "ssr1_movies_c"
    # allowed_domains = ["ssr1.scrape.center"]  #允许抓取的域名 也就是抓这个网站上的数据
    #默认属于该网站上的链接的域名都是这个  域名不为这个的话就认为是广告等不需要的链接
    #写了这个的话  下面的rules中的规则就可以写的笼统点  不写的话就要写清楚点
    start_urls = [f"https://ssr1.scrape.center/page/{i}" for i in range(1,11)]
    print(1)
    #链接提取器，会自动在每个响应对象中进行链接提取
    # allow='正则规则'  follow 是否需要深度解析获取
    rules = (Rule(LinkExtractor(allow=r"/detail/\d+"), callback="parse_item", follow=False),)
    # restrict_xpaths=xpath规则
    # rules = (Rule(LinkExtractor(restrict_xpaths=r"//a[@class='name']"), callback="parse_item", follow=False),)
    print(rules)

    def parse_item(self, response):
        # print(response.url)
        item = MovieItem()
        item['id'] = response.url.split('/')[-1]
        item['title'] = response.xpath('//h2[@class="m-b-sm"]/text()').get()
        item['category'] = '、'.join(response.xpath('//div[@class="categories"]//span/text()').getall())
        item['addr'] = response.xpath('//div[contains(@class,"info")]/span[1]/text()').get()
        item['timer'] = response.xpath('//div[contains(@class,"info")]/span[3]/text()').get()
        item['date'] = response.xpath('//div[contains(@class,"info")][2]/span/text()').get()
        item['drama'] = response.xpath('//div[@class="drama"]/p/text()').get()
        item['score'] = response.xpath('//p[contains(@class,"score ")]/text()').get()
        item['cover'] = response.xpath('//img[@class="cover"]/@src').get()
        print(item['cover'])
        return item


# class Ssr1MovieCSpider(CrawlSpider):
#     name = "ssr1_movies_c"
#     # allowed_domains = ["ssr1.scrape.center"]
#     start_urls = [f"https://ssr1.scrape.center/page/{i}" for i in range(1, 2)]
#
#     # 链接提取器，会自动在每个响应进行链接提取
#     # allow 正则匹配规则
#     # callback 得到链接响应后面的解析方法
#     # rules = (Rule(LinkExtractor(allow=r"/detail/\d+"), callback="parse_item", follow=False),)
#     rules = (Rule(LinkExtractor(restrict_xpaths=r"//a[@class='name']"), callback="parse_item", follow=False),)
#
#     def parse_item(self, response):
#         item = MovieItem()
#         item['id'] = response.url.split('/')[-1]
#         item['title'] = response.xpath('//h2[@class="m-b-sm"]/text()').get()
#         item['category'] = '、'.join(response.xpath('//div[@class="categories"]//span/text()').getall())
#         item['addr'] = response.xpath('//div[contains(@class,"info")]/span[1]/text()').get()
#         item['timer'] = response.xpath('//div[contains(@class,"info")]/span[3]/text()').get()
#         item['date'] = response.xpath('//div[contains(@class,"info")][2]/span/text()').get()
#         item['drama'] = response.xpath('//div[@class="drama"]/p/text()').get()
#         item['score'] = response.xpath('//p[contains(@class,"score ")]/text()').get()
#         item['cover'] = response.xpath('//img[@class="cover"]/@src').get()
#         return item
