import asyncio
import os.path
import re
from asyncio import StreamReader

import requests
from lxml import etree
# import execise_proxy_ip
import aiohttp
import random
import time

url_index = "https://dandanzan.net"
url_movie_item_1 = "/fetch_plays/"
url_movie_item_2 = "/chao_qing"
url_lib = "https://baidu.sd-play.com/"


# 获取IP数组
# ips = execise_proxy_ip.get_ips()


# 获取电影排行榜列表
async def get_movie_list():
    async with aiohttp.ClientSession() as session:
        async with session.get(url_index, ssl=False) as response:
            selector = etree.HTML(await response.content.read())
            url_list = selector.xpath(
                '//div[@class="sidebar"]/div[@class="lists lists-rank lists-title-lines-1"]/div[@class="lists-content"]/ul/li/h2/a')
            title_and_id_list = {}
            for titleAndHref in url_list:
                movie_name = titleAndHref.xpath("./@title")[0]
                movie_url = titleAndHref.xpath("./@href")[0]
                movie_id = (os.path.basename(movie_url))[0:-5]
                title_and_id_list[movie_name] = movie_id
            return title_and_id_list


# 获取所有电影的超清列表链接
async def get_chaoqing_movie_list(movie_title, movie_id):
    list_url = url_index + url_movie_item_1 + str(movie_id) + url_movie_item_2
    async with aiohttp.ClientSession() as session:
        async with session.get(list_url, ssl=False) as response:
            title_and_id_and_video_plays = {}
            ret = await response.text()
            if response.status != 200:
                return -1
            ret = eval(ret)
            if not os.path.exists(movie_title):
                os.makedirs(movie_title)

            title_and_id_and_video_plays["video_plays"] = ret.get("video_plays")
            title_and_id_and_video_plays["movie_name"] = movie_title
            title_and_id_and_video_plays["movie_id"] = movie_id
            return title_and_id_and_video_plays


# 获取视频的index_m3u8接口的返回值
async def get_index_m3u8(title_and_href):
    movie_name = title_and_href["movie_name"]
    play_data = title_and_href["video_plays"][0]["play_data"]
    async with aiohttp.ClientSession() as session:
        async with session.get(play_data, ssl=False) as response:
            ret = await response.text("utf8")
            if response.status != 200:
                return -1
            m3u8 = re.findall("/\d.*?/\w.*?/\w.*?/\w.*?/index.m3u8", ret, re.S)[0]
            index_m3u8 = url_lib + m3u8
            async with aiohttp.ClientSession() as session:
                async with session.get(index_m3u8, ssl=False) as res:
                    with open(f"{movie_name}/index.m3u8", "wb") as f:
                        f.write(await res.content.read())
                    if res.status != 200:
                        os.remove(f"{movie_name}/index.m3u8")
                        return -1


async def get_key_ts_by_index_m3u8(title_and_id):
    movie_name = title_and_id["movie_name"]
    if not os.path.exists(f"{movie_name}/index.m3u8"):
        return -1
    with open(f"{movie_name}/index.m3u8") as f:
        m3u8 = f.read()
    ts_list = re.findall("https:.*?\.ts", m3u8)
    key = re.findall("https:.*?\.key", m3u8)[0]
    key_name = os.path.basename(key)
    async with aiohttp.ClientSession() as session:
        async with session.get(key, ssl=False) as response:
            with open(f"{movie_name}/{key_name}", "wb") as f:
                f.write(await response.content.read())
    return ts_list, movie_name


async def get_ts(ts, movie_name):
    ts_name = os.path.basename(ts)
    if os.path.exists(f"{movie_name}/{ts_name}"):
        return -1
    res = requests.get(ts)
    with open(f"{movie_name}/{ts_name}", "wb") as f:
        f.write(res.content)
        print(f"{ts_name}写入完成")


async def ts_to_mp4_by_ffmpeg(title_chaoqing_href_list):
    movie_name = title_chaoqing_href_list["movie_name"]
    with open(f"{movie_name}/index.m3u8", "r") as f:
        m3u8 = f.read()
    m3u8 = m3u8.replace("https://baidu.shanshanku.com/20221118/8Z5fU368/1200kb/hls/", "")
    with open(f"{movie_name}/index.m3u8", "w") as f:
        f.write(await m3u8)
    print(m3u8)
    os.chdir(f"{movie_name}")
    os.system(
        f'ffmpeg -allowed_extensions ALL -protocol_whitelist "file,http,crypto,tcp" -i index.m3u8 -c copy {movie_name}\.mp4')
    print(movie_name + "下载完毕")


async def main():
    # 获取视频排行榜
    title_and_id_list = await get_movie_list()
    # 获取所有电影的超清列表链接
    title_super_definition_href_list = []
    tasks_item_1 = [asyncio.create_task(get_chaoqing_movie_list(title, id)) for title, id in title_and_id_list.items()]
    await asyncio.wait(tasks_item_1)
    for task in tasks_item_1:
        ret = task.result()
        if not ret == -1:
            title_super_definition_href_list.append(ret)

    # 从index.m3u8中获取电影的m3u8的列表，写到文件中

    tasks_item_2 = [asyncio.create_task(get_index_m3u8(title_and_href)) for title_and_href in
                    title_super_definition_href_list]
    await asyncio.wait(tasks_item_2)
    #
    # 根据文件获取全部的key文件并把ts拿回来
    ts_list_and_name = []
    tasks_item_3 = [asyncio.create_task(get_key_ts_by_index_m3u8(title_and_id)) for title_and_id in
                    title_super_definition_href_list]
    await asyncio.wait(tasks_item_3)
    for task in tasks_item_3:
        ret = task.result()
        if not ret == -1:
            ts_list_and_name.append(ret)
    #
    for ts_list, movie_name in ts_list_and_name:
        tasks_item_4 = [asyncio.create_task(get_ts(ts, movie_name)) for ts in
                        ts_list]
        await asyncio.wait(tasks_item_4)
    # # 用ffmpeg去合并ts为一个mp4文件

    tasks_item_5 = [asyncio.create_task(ts_to_mp4_by_ffmpeg(title_super_definition_href)) for
                    title_super_definition_href in
                    title_super_definition_href_list]
    await asyncio.wait(tasks_item_4)


if __name__ == '__main__':
    start = time.time()
    asyncio.run(main())
    print(f"下载完毕,耗时：{time.time() - start}")
