import json
import os
import re

import scrapy
from scrapy import Spider
from twisted.internet.defer import Deferred


class VodplaySpider(scrapy.Spider):
    name = "vodplay"
    # allowed_domains = ["www.ysgc.tv", "v.cdnlz3.com"]
    # start_urls = ["https://www.ysgc.tv/vodsearch/-------------.html?wd=大奉打更人"]
    start_urls = ["https://www.ysgc.tv/vodsearch/-------------.html?wd=清明上河图密码"]
    download_dir = 'movies'
    title = None

    def parse(self, response):
        """
        解析搜索响应，得到详情地址和名称
        :param response:
        :return:
        """
        div = response.xpath('//div[@class="public-list-div public-list-bj"]')
        self.title = div.xpath('./a/@title').get()
        href = div.xpath('./a/@href').get()
        print(self.title, href)

        # 创建好目录准备保存下载的影视信息 movies/大奉打更人
        os.makedirs(rf'{self.download_dir}/{self.title}', exist_ok=True)

        return scrapy.Request(
            url=f'https://www.ysgc.tv{href}',
            callback=self.parse_detail
        )

    def parse_detail(self, response):
        """
        解析详情信息，得到目录数据
        :param response:
        :return:
        """
        urls = re.findall(r'/vodplay/\d+-1-\d+.html', response.text)
        print('共有%d集' % len(urls))
        for i, url in enumerate(urls[:2]):
            yield scrapy.Request(
                url=f'https://www.ysgc.tv{url}',
                callback=self.parse_info,
                meta={'page': i + 1}
            )

    def parse_info(self, response):
        """
        解析每集数据，得到newffzy.php的请求参数
        :param response:
        :return:
        """
        # 创建好目录准备保存下载的每一集影视信息 movies/大奉打更人/1
        os.makedirs(rf'{self.download_dir}/{self.title}/{response.meta["page"]}', exist_ok=True)

        data = re.findall(r'var player_aaaa=(.*?)</script>', response.text)[0]
        vodplay_data = json.loads(data)

        url = vodplay_data['url']
        thumb = vodplay_data['vod_pic_thumb']

        return scrapy.Request(
            url=f'https://www.ysgc.tv/static/player/newffzy.php?url={url}&thumb={thumb}',
            callback=self.parse_m3u8,
            meta=response.meta
        )

    def parse_m3u8(self, response):
        """
        解析php请求，得到index.m3u8这个地址
        :param response:
        :return:
        """
        m3u8_url = re.findall(r"url: '(.*?)'", response.text)[0]
        print(m3u8_url)

        return scrapy.Request(
            url=m3u8_url,
            callback=self.parse_data_m3u8,
            meta=response.meta
        )

    def parse_data_m3u8(self, response):
        """
        解析index.m3u8，得到mixed.m3u8
        :param response:
        :return:
        """
        data_m3u8_url = re.findall(r'.*?\.m3u8', response.text)[0]
        data_m3u8_url = response.request.url.replace('index.m3u8', data_m3u8_url)

        return scrapy.Request(
            url=data_m3u8_url,
            callback=self.parse_ts,
            meta=response.meta
        )

    def parse_ts(self, response):
        """
        解析mixed.m3u8，得到所有的ts地址
        https://v.cdnlz3.com/20241228/31535_96db027a/2000k/hls/6644d566710000001.ts
        :param response:
        :return:
        """
        ts_list = re.findall(r'.*?\.ts', response.text)
        print('片段个数：', len(ts_list))

        # 创建一个文件，记录要合并的ts文件
        with open(fr'{self.download_dir}/{self.title}/{response.meta["page"]}/input.txt', 'w', encoding='utf-8') as fp:
            for ts in ts_list[:50]:
                # file '要合并的文件地址'
                fp.write(f"file '{ts}'\n")
                data_ts_url = response.request.url.replace('mixed.m3u8', ts)
                yield scrapy.Request(
                    url=data_ts_url,
                    callback=self.parse_data,
                    meta=response.meta
                )

    def parse_data(self, response):
        """
        解析ts数据，保存为ts文件
        movies/大奉打更人/1/12316544.ts
        :param response:
        :return:
        """
        name = response.request.url.split('/')[-1]
        print(name)
        with open(fr'{self.download_dir}/{self.title}/{response.meta["page"]}/{name}', 'wb') as fp:
            fp.write(response.body)

    def close(spider: Spider, reason: str) -> Deferred[None] | None:
        """
        爬虫执行完毕后，准备关闭时，引擎会自动调用这个close
        合并ts，变为mp4
        :param reason:
        :return:
        """
        os.chdir(fr'{spider.download_dir}/{spider.title}')

        for i in os.listdir():
            # ffmpeg -f concat(拼接) -i (读取指定文件内容，来决定拼接哪些文件) -c copy -threads (线程) 输出的文件地址
            os.system(fr'ffmpeg -f concat -i {i}/input.txt -c copy -threads 4 {i}_{spider.title}.mp4')
