import os
import sys
import json
import queue
import shutil
import asyncio
import aiohttp
import requests
import argparse
import ffmpeg
from fake_useragent import UserAgent


parser = argparse.ArgumentParser(description='视频下载器目前仅支持 .m3u8文件')

# 视频url
parser.add_argument("-d", "--dir", type=str, default=None, help="download video dir; 视频下载文件夹")
# 输出文件
parser.add_argument("-f", "--file", type=str, default=None, help="m3u8 url file; 存放.m3u8链接文件 格式: 视频名,m3u8链接\n")
# 并发下载数
parser.add_argument("-t", "--thread", type=int, default=None, help="download thread; 并发下载数,默认50, 下载爱奇艺视频时需要限制并发数少于10\n")
# 脚本参数
args = parser.parse_args()

ua = UserAgent()

header = {
	'User-Agent': ua.random,
	'Accept': '*/*',
	'Accept-language': 'zh-CN,zh;q=0.9',
	'Connection': 'keep-alive',
	'sec-ch-ua-mobile': '?0',
	'Sec-Fetch-Dest': 'empty',
	'Sec-Fetch-Mode': 'cors',
	'Sec-Fetch-Site': 'cross-site',
}
# 腾讯视频链接:   bosskv 上的vurl值
# 优酷视频链接:   .m3u8
# 爱奇艺视频链接: /dash
# 芒果视频链接: .m3u8

# ts文件队列
ts_info_queue = queue.Queue()
# 视频链接队列
video_info_queue = queue.Queue()
# 合并队列
concat_queue = queue.Queue()


# 读取文件
def read_file(file_path):
	with open(file_path, 'r') as f:
		return f.readlines()


# 创建目录
def create_dir(name):
	if not os.path.exists(name):
		os.makedirs(name)
		print(name, '=== 目录创建成功')
	else:
		print(name, '=== 目录存在')

# 处理请求头
def handle_headers(url):
	# 芒果tv
	if 'mgtv.com' in url:
		header['Origin'] = 'https://www.mgtv.com'
		header['Referer'] = 'https://www.mgtv.com'
	# 腾讯视频
	elif 'tc.qq.com' in url:
		header['Origin'] = 'https://v.qq.com'
		header['Referer'] = 'https://v.qq.com'
	# 优酷视频
	elif 'cibntv.net' in url:
		header['Origin'] = 'https://v.youku.com'
	# 爱奇艺
	elif 'iqiyi.com' in url:
		header['Origin'] = 'https://www.iqiyi.com'
	return header


# 处理爱奇艺链接
def handle_aqiyi_m3u8(content):
	try:
		video_info = json.loads(content).get('data').get('program').get('video')
		for video in video_info:
			if video.get('m3u8') is not None:
				return video.get('m3u8')
			else:
				continue
	except Exception as e:
		raise Exception(f'爱奇艺数据解析失败, 错误: {e}')


# 处理m3u8文件中的ts链接
def handle_ts_url():
	video_info = video_info_queue.get()
	url = video_info.get('video_url').strip()
	print(f'正在解析: {url}')
	# 设置请求头
	header = handle_headers(url)
	# print(f'headers: {header}')
	# 获取m3u8文件
	response = requests.get(url=url, headers=header)

	if response.status_code == 200:
		content = response.content.decode('utf-8')
		if 'iqiyi.com' in url:
			content = handle_aqiyi_m3u8(content)
		# print(f'content: {content}')
		ts_video = queue.Queue()
		index = 0
		for line in content.split('\n'):
			if 'ts' in line:
				if not line.startswith('http'):
					# 使用rstrip()方法移除结尾的斜杠
					url = url.split("?")[0].rstrip('/')
					# 使用rfind()方法找到最后一个斜杠的索引
					last_slash_index = url.rfind('/')
					if last_slash_index != -1:
						extracted_content = url[:last_slash_index]
						ts_url = extracted_content + '/' + line
						ts = {
							'ts_url': ts_url,
							'index': index
						}
						# print(f'ts: {ts}')
						ts_video.put(ts)
				elif line.startswith('http'):
					ts = {
						'ts_url': line,
						'index': index
					}
					# print(f'ts: {ts}')
					ts_video.put(ts)
				index += 1
		ts_info = {
			'ts_list': ts_video,
			'path': video_info.get('path'),
			'video_url': url
		}
		# print(f'ts_info: {ts_info}')
		ts_info_queue.put(ts_info)
	else:
		print(f'文件解析失败: {url}, 响应码: {response.status_code}, 响应数据: {response.text}')


# 处理m3u8链接信息 (返回是否是爱奇艺链接)
def init_video_info(file_path, dir_name) -> bool:
	flag = False
	# 读取视频下载链接
	for line in read_file(file_path):
		try:
			if ',' in line and 'http' in line:
				# 获取视频名,m3u8链接
				video_name, m3u8_url = line.split(',')
				# ts文件下载目录
				video_path = os.path.join(dir_name, video_name)
				# 创建目录
				create_dir(video_path)
				video_info = {
					'path': video_path,
					'video_name': video_name,
					'video_url': m3u8_url
				}
				video_info_queue.put(video_info)
				if 'iqiyi' in m3u8_url:
					flag = True
				else:
					flag = False
		except Exception as e:
			print(f'm3u8链接处理失败: {line}, ({str(e)})')
		finally:
			return flag

# 转成mp4
def convert_to_mp4(input_file, output_file):
	ffmpeg.input(input_file).output(output_file + '.mp4', vcodec='copy', acodec='copy', loglevel='quiet').run()
	print(f'{output_file} ==> 格式转换完成!')
	# 检查文件是否存在  
	if os.path.exists(input_file):
		# 删除application/octet-stream文件
		os.remove(input_file)
		print(f"文件 {input_file} 已被删除")
	else:
		print(f"文件 {input_file} 不存在")

# 合并视频
async def concat_ts_into_mp4(input_dir, output_file, semaphore):
	# 获取所有 .ts 文件的路径
	input_files = sorted([os.path.join(input_dir, f) for f in os.listdir(input_dir) if f.endswith('.ts')])
	print(f'\n正在合并视频: {output_file}')
	async with semaphore:
		# 打开输出文件  
		with open(output_file, 'wb') as out_file:
			# 依次读取每个输入文件的内容并写入输出文件         
			for input_file in input_files:
				with open(input_file, 'rb') as in_file:
					shutil.copyfileobj(in_file, out_file)
		# 检查目录是否存在  
		if os.path.exists(input_dir):
			# 删除ts存放目录及其内容
			shutil.rmtree(input_dir)
			print(f"目录 {input_dir} 及其内容已被删除")
		else:
			print(f"目录 {input_dir} 不存在")
		convert_to_mp4(output_file, output_file)


# 重新下载
def re_download(url, destination):
	response = requests.get(url=url, headers=header)
	if response.status_code == 200:
		content = response.content
		with open(destination, 'wb') as file:
			file.write(content)
		print(f'正在尝试重新下载: {url}, 下载成功: {destination}')


# 下载ts文件
async def download_file(session, url, destination, semaphore):
	header = handle_headers(url)
	try:
		async with semaphore:
			async with session.get(url, headers=header) as response:
				if response.status == 200:
					with open(destination, 'wb') as file:
						while True:
							# 每次读取20MB
							chunk = await response.content.read((2<<19)*20)
							await asyncio.sleep(1)
							if not chunk:
								break
							file.write(chunk)
						# 不换行输出
						sys.stdout.write(f'\r正在下载： {destination}')
						sys.stdout.flush()  # 刷新输出缓冲区
				else:
					print(f"\n下载失败: {url} (HTTP 状态码: {response.status})")
					raise Exception('下载异常')
	except Exception as e:
		print(f"\n下载失败: {url} 错误信息: ({str(e)})")
		re_download(url, destination)


async def main(thread=50):
	# 创建一个Semaphore，限制同时执行的下载任务数量为50
	download_semaphore = asyncio.Semaphore(thread)
	# 创建一个Semaphore，限制同时执行的合并处理任务数量为5
	concat_semaphore = asyncio.Semaphore(5)
	
	async with aiohttp.ClientSession() as session:
		# 下载任务列表
		download_tasks = []

		while not ts_info_queue.empty():
			ts_info = ts_info_queue.get()
			ts_des_path = ts_info.get('path')
			# 合并目录队列添加路径
			concat_queue.put(ts_des_path)
			while not ts_info.get('ts_list').empty():
				ts = ts_info.get('ts_list').get()
				ts_url = ts.get('ts_url')
				ts_name = str(ts.get('index')).zfill(4) + '.ts'
				# 下载文件路径
				destination = os.path.join(ts_des_path, ts_name)
				task = download_file(session, ts_url, destination, download_semaphore)
				download_tasks.append(task)
		await asyncio.gather(*download_tasks)

	# 合并文件任务列表
	concat_tasks = []
	while not concat_queue.empty():
		# 存放ts文件目录
		ts_path = concat_queue.get()
		input_file, filename = os.path.split(ts_path)
		output_file = os.path.join(input_file, 'video') + '/' + filename
		task = concat_ts_into_mp4(ts_path, output_file, concat_semaphore)
		
		concat_tasks.append(task)
	await asyncio.gather(*concat_tasks)


if __name__ == "__main__":
	if args.dir and args.file:
		dir_name = args.dir
		file_path = args.file
		thread = args.thread
		
		is_iqiyi = init_video_info(file_path, dir_name)

		if (is_iqiyi == True and thread is None) or (is_iqiyi == True and thread > 20):
			thread = 20
			print('正式下载爱奇艺视频, 线程数已限制为20')
		elif thread is None:
			thread = 50

		while not video_info_queue.empty():
			handle_ts_url()
		create_dir(os.path.join(dir_name, 'video'))
		asyncio.run(main(thread))
	else:
		parser.print_help()
