from DrissionPage import ChromiumPage
import scrapy
from video_scrapy.items import VideoScrapyItem



class VideoSpiderSpider(scrapy.Spider,):
    name = "video_spider"
    allowed_domains = ["girigirilove.com"]

    def __int__(self,json_list = None,r_url = None):
        super(VideoSpiderSpider,self).__init__()
        self.json_list = json_list
        self.r_url = r_url


    '''
    请求m3u8文件
    '''
    def start_requests(self):
        if self.json_list is None:
            # 获取json文件创建请求
            for i in self.json_list:
                title = i['title']               # 视频标题
                episode_urls = i['episode_urls'] # 播放地址
                for u in episode_urls:
                    page = ChromiumPage()
                    # 目标网址
                    page.get(u)
                    # 获取剧集集数
                    parts = u.rstrip('/').split('-')
                    last_number = parts[-1]  # 获取最后一个元素

                    src = page.wait.ele_displayed('x://td[@id="playleft"]/iframe').attr('src')
                    page.quit()

                    # 获取m3u8请求链接
                    from urllib.parse import urlparse, parse_qs
                    url = src
                    # 解析URL参数
                    parsed_url = urlparse(url)
                    query_params = parse_qs(parsed_url.query)
                    # 获取url参数的值
                    if 'url' in query_params:
                        m3u8_url = query_params['url'][0]
                        # 发起请求
                        yield scrapy.Request(url=m3u8_url, callback=self.parse,meta={'title':None,"last_number":None})
                    else:
                        print("未找到url参数")
        else:
            page = ChromiumPage()
            # 目标网址
            page.get(self.r_url)
            # 获取剧集集数
            parts = self.r_url.rstrip('/').split('-')
            last_number = parts[-1]  # 获取最后一个元素

            src = page.wait.ele_displayed('x://td[@id="playleft"]/iframe').attr('src')
            page.quit()

            # 获取m3u8请求链接
            from urllib.parse import urlparse, parse_qs
            url = src
            # 解析URL参数
            parsed_url = urlparse(url)
            query_params = parse_qs(parsed_url.query)
            # 获取url参数的值
            if 'url' in query_params:
                m3u8_url = query_params['url'][0]
                # 发起请求
                yield scrapy.Request(url=m3u8_url, callback=self.parse,
                                     meta={'title': title, "last_number": last_number})
            else:
                print("未找到url参数")

    '''
    解析m3u8文件，向ts视频片段发起请求
    '''
    def parse(self, response):
        title = response.meta["title"]                # 剧集名
        last_number = response.meta["last_number"]    # 剧集数

        m3u8_text = response.text
        lines = m3u8_text.split('\n')
        for line in lines:
            if line.endswith('.ts'):
                number_ = line.strip()                # ts片段名
                ts_url = 'https://m3u8.girigirilove.com/zijian/oldanime/2025/07/cht/9nineShihaishanoOukanCHT/01/' + line.strip()

                # 向每一个ts视频发起请求
                yield scrapy.Request(url=ts_url, callback=self.parse_1,meta={'number_':number_,"title":title,"last_number":last_number})

    '''
    下载ts视频提交管道
    '''
    def parse_1(self, response):
        videoscrapyiteam = VideoScrapyItem()
        title = response.meta["title"]                            # 视频名
        last_number = response.meta["last_number"]                # 剧集数

        videoscrapyiteam['title'] = title
        videoscrapyiteam['last_number'] = last_number
        videoscrapyiteam['file_name'] = response.meta['number_']  # 获取片段序列，方便文件命名
        videoscrapyiteam['video_con'] = response.body
        print(f'{response.meta['number_']}提交管道')
        yield videoscrapyiteam