#!/usr/bin/env python
# -*- coding:utf-8 -*-

"""
@author zyx
@since 2022/2/18 08:42
@file: m3u8_1905.py
@desc: 协程爬取1905电影
"""
import requests
from urllib.parse import urljoin
import asyncio
import aiohttp

headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/98.0.4758.102 Safari/537.36'
}

# 爬取1905电影视频
# https://www.1905.com/vod/play/516604.shtml
m3u8_url = 'https://m3u8i.vodfile.m1905.com/202202190027/5b34abb9e548d649e16a1819f483af96' \
           '/movie/2019/03/28/m201903286TO0ZJYJNYCKNTNY/E6EA61B864A09818E3F40C9DC.m3u8'

m3u8_text = requests.get(url=m3u8_url, headers=headers).text
m3u8_text = m3u8_text.strip()
# print(m3u8_text)

ts_url_list = [urljoin(m3u8_url, line) for line in m3u8_text.split('\n') if not line.startswith('#')]

# windows打开文件的最大数默认是509, 需要限制并发量
# https://www.cnblogs.com/jiyu-hlzy/p/15236547.html
sem = asyncio.Semaphore(10)


async def get_req(url):
    async with sem:
        async with aiohttp.ClientSession() as sess:
            async with await sess.get(url=url, headers=headers) as resp:
                resp_data = await resp.read()
    return url, resp_data


def download(t):
    url, resp_data = t.result()
    file_name = url.split('/')[-1]
    ts_path = f'tsfiles/{file_name}'
    with open(ts_path, 'wb') as fp:
        fp.write(resp_data)
    print(f'{file_name}下载成功')


tasks = []
# 协程爬取太快以下报错偶现，可以适当调节Semaphore量
# Cannot connect to host xxxxx:443 ssl:default [None]
for ts_url in ts_url_list:
    task = asyncio.ensure_future(get_req(ts_url))
    task.add_done_callback(download)
    tasks.append(task)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
