import scrapy
from video_scrapy.items import DetailsItem


# 以日番为例
class DetailsSpiderSpider(scrapy.Spider):
    name = "details_spider"
    allowed_domains = ["girigirilove.com"]

    def __init__(self, page_size=1,model=1,search_get=None):
        super(DetailsSpiderSpider, self).__init__()
        self.page_size = page_size                   # 翻页数
        self.model = model                           # 模式选择
        self.search_get = search_get                 # 搜索结果


    def start_requests(self):
        # 模式一：全自动批量爬取模式
        if self.model == 1:
            # 处理翻页
            page_number = 1
            while page_number <= self.page_size:
                url = f"https://anime.girigirilove.com/show/2--------{page_number}---/"
                page_number += 1
                yield scrapy.Request(url=url,callback=self.parse,dont_filter=True,meta={"page_number":page_number-1})


        # 模式三:关键词定向采集模式
        elif self.model == 3:
            yield scrapy.Request(url=f"https://anime.girigirilove.com/show/2---{self.search_get}-----{page_number}---/",callback=self.parse)

        else:
            return None

    '''
    获取视频基本信息
    '''
    def parse(self, response,):
        if response.status == 200:
            data_list = response.xpath("//div[@class='box-width wow fadeInUp']/div[@class='flex wrap border-box public-r']/div")
            for data in data_list:
                title = data.xpath(".//a[@class='public-list-exp']/@title").get()                                              # 标题
                # https://anime.girigirilove.com/GV26625/            /GV26625/
                href = 'https://anime.girigirilove.com' + data.xpath(".//a[@class='public-list-exp']/@href").get()             # 详情页
                episode = data.xpath(".//a[@class='public-list-exp']/span[@class='public-list-prb hide ft2']/text()").get()    # 剧集数

                data_dict ={
                    "page_number":response.meta["page_number"],
                    "title": title,                # 视频标题
                    "href": href,                  # 详情页链接
                    "episode": episode,            # 剧集数
                }
                print(data_dict)
                yield scrapy.Request(url=data_dict['href'],callback=self.video_url,meta=data_dict)

        else:
            print('网络请求失败！')


    '''
    获取每一集的播放地址
    '''
    def video_url(self,response):
        data_dict = response.meta
        episode_s = response.xpath('//div[@class="anthology wow fadeInUp animated"]/div[@class="anthology-list top20 select-a"]//ul[@class="anthology-list-play size"]/li/a/@href').getall()
        # 构建完整URL列表
        href_list = ['https://anime.girigirilove.com' + href for href in episode_s]
        data_dict['episode_urls'] = href_list

        # 提交管道
        details_item = DetailsItem()
        details_item['page_number'] = data_dict['page_number']
        details_item['title'] = data_dict['title']
        details_item['episode'] = data_dict['episode']
        details_item['episode_urls'] = data_dict['episode_urls']
        print(f'{data_dict['title']}已提交管道！')
        yield details_item

