import requests,re,asyncio,aiohttp,aiofiles
from bs4 import BeautifulSoup

#负责保存文件,requests与open整合
def save(url,name,mode=None,encoding=None):
    resp = requests.get(url)
    with open('{}'.format(name),mode=mode,encoding=encoding) as f:
        f.write(resp.content)

#负责requests工作
def down(url):
    headers = {}
    params = {}
    data = {}

    res = requests.get(url)
    return res

def get_iframm_src(res):
    url1 = ''.join(re.findall(r'"url":"(.*?)","url_next"', res)).strip().replace('\/', '/')
    return url1

async def aio_download_ts(url,name,session):
    print('downloadts')
    async with session.get(url) as resp:
        async with aiofiles.open(f'video2/{name}',mode='wb') as f:
            await f.write(await resp.content.read()) #把下载的内容写入文件
    print(name,'success')

async def aio_download(url):
    print('download')
    async with aiohttp.ClientSession() as session:
        tasks = []
        async with aiofiles.open('m3u82.m3u8',mode='r',encoding='utf-8') as f:
            async for line in f:
                if line[0] == "#":
                    continue
                ts_url = url + line

                name = ''.join(line).split('/')[5][:-1]
                # print(name)
                d = asyncio.create_task(aio_download_ts(ts_url,name,session)) # 创建任务
                print(ts_url,name,type(name))
                tasks.append(d) # 任务添加进tasks列表
            await asyncio.wait(tasks)  # 等待任务结束
def main(url):
    #1，拿到主页面的页面源代码
    res = down(url).text #1.1 主页面源代码

    #2，拿到iframe里第一层m3u8的下载地址，并下载文件
    first_m3u8_url = get_iframm_src(res) #2.1 ifram对应url
    save(url=first_m3u8_url,name='m3u81.m3u8',mode='wb')  #2.2 保存第一层m3u8文件

    #3，读取第一层m3u8文件获取第二层m3u8文件并下载
    with open('m3u81.m3u8',mode='r',encoding='utf-8') as f: # 3.1 读取第一层m3u8文件
        """方法一"""
        # line = ''.join('https://s1.fsvod1.com'+ f.readlines()[2]).strip() #读取第二行内容并拼接为第二层m3u8文件url
        """方法二"""
        """逐行遍历，以#开头的排除"""
        for line in f.readlines():
            if line.startswith("#"):
                continue
            second_m3u8_url = ''.join('https://s1.fsvod1.com'+ line).strip()
            print(second_m3u8_url)
            break
            #https://s1.fsvod1.com/20220310/yaZTFWvY/1200kb/hls/index.m3u8
            #https://s1.fsvod1.com/20220310/yaZTFWvY/1200kb/hls/UV13VRy5.ts

    #  TODO 出现了手动访问链接正常 但是爬取时提示404问题:首先打印状态码发现是404,尝试把line使用''.join转换为字符串，状态码成功变成200正常
    save(name='m3u82.m3u8',mode='wb',url=second_m3u8_url) # 3.2 保存第二层m3u8文件

    #4.下载视频
    second_m3u8_url_up = second_m3u8_url.replace('/20220310/yaZTFWvY/1200kb/hls/index.m3u8','')

    #5,异步协程
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    loop.run_until_complete(aio_download(second_m3u8_url_up))


    pass






if __name__ == '__main__':
    url = 'http://www.niumatv.com/play/5563-2-1.html'
    main(url)




    # n = 1
    # with open('m3u82.m3u8',mode='r',encoding='utf-8') as f:
    #     for line in f:
    #         if line.startswith('#'):
    #             continue
    #         Tsurl = ''.join('https://s1.fsvod1.com'+line).strip()
    #         res3 = down(Tsurl) #下载视频文件
    #         save(name = f'video/{n}.ts',mode='wb',res=res3.content)
    #         n+=1
    #         print('success')
    # print(res1.content)