import logging
import time
import hashlib
import m3u8, aiohttp, asyncio, os, requests
from mitmproxy import http
from cover import m3u8_cover

API_URL = os.getenv("API_URL", "http://localhost:3000/api")


def get_m3u8_info(m3u8_url):
    max_retries = 3
    retry_delay = 2  # 秒
    timeout = 30  # 秒
    
    # 自定义请求头，模拟浏览器行为
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Accept': '*/*',
        'Connection': 'keep-alive'
    }
    
    for attempt in range(max_retries):
        try:
            # 使用更安全的方式加载m3u8文件，添加超时和请求头
            logging.info(f"尝试加载m3u8文件 (尝试 {attempt + 1}/{max_retries}): {m3u8_url}")
            playlist = m3u8.load(m3u8_url, timeout=timeout, headers=headers, verify_ssl=False)
            
            # json形式打印playlist所有的信息
            logging.info(f"playlist: {len(playlist.segments)}")
            # 计算总时长，并取整数部分
            total_duration = sum([segment.duration for segment in playlist.segments])
            # 根据当前时间戳生成一个随机的title
            title = f"{playlist.media_sequence}_{int(time.time())}_{int(total_duration)}"
            return {
                "title": title,
                "durationSeconds": int(total_duration)
            }
        except Exception as e:
            logging.error(f"加载m3u8文件失败 (尝试 {attempt + 1}/{max_retries}): {str(e)}")
            if attempt < max_retries - 1:
                logging.info(f"{retry_delay}秒后重试...")
                time.sleep(retry_delay)
            else:
                logging.error(f"达到最大重试次数，加载m3u8文件失败: {m3u8_url}")
                # 返回默认值，避免程序崩溃
                return {
                    "title": f"error_{int(time.time())}",
                    "durationSeconds": 0
                }


def save2db(source_url):
    # 需要准备的参数
    base_info = get_m3u8_info(source_url)
    # 如果总时长小于10秒，就不保存
    if base_info["durationSeconds"] < 10:
        logging.info(f"总时长小于10秒，不保存: {base_info['durationSeconds']}秒 {base_info}")
        return
    # 使用source_url的hash值作为id
    hash_obj = hashlib.md5(source_url.encode('utf-8'))
    id = hash_obj.hexdigest()
    
    # 检查数据库是否已存在该数据
    logging.info(f"检查数据库是否存在该数据，id: {id}")
    check_resp = requests.get(f"{API_URL}/videos/{id}")
    result = check_resp.json()
    if check_resp.status_code == 200 and result.get('success'):
        logging.info(f"数据已存在，跳过保存: id={id}, 数据: {result}")
        return
    
    # 生成封面
    cover_jpg = m3u8_cover(source_url, f"video_{id}_cover.jpg")
    params = {
        "id": id,
        "title": base_info["title"],
        "category": "2025102906",
        "sourceUrl": source_url,
        "coverUrl": cover_jpg,
        "description": "",
        "durationSeconds": base_info["durationSeconds"],
    }
    logging.info(f"准备保存到数据库: {params}")
    # requests 发送POST请求到API
    resp = requests.post(f"{API_URL}/videos", json=params)
    if resp.status_code == 200:
        logging.info(f"成功保存到数据库: {params}")
    else:
        logging.error(f"保存到数据库失败: {resp.status_code} {resp.text}")

    # async with aiohttp.ClientSession() as session:
    #     async with session.post(f"{API_URL}/videos", json=params) as resp:
    #         if resp.status == 200:
    #             logging.info(f"成功保存到数据库: {params}")
    #         else:
    #             logging.error(f"保存到数据库失败: {resp.status} {resp.text()}")


class M3U8Sniffer:
    def __init__(self):
        self.download_path = "./downloads"
        os.makedirs(self.download_path, exist_ok=True)

    def response(self, flow: http.HTTPFlow):
        if flow.request.url.endswith('.m3u8') or '.m3u8?' in flow.request.url:
            logging.info(f"抓到m3u8文件: {flow.request.url}")
            save2db(flow.request.url)
            # 本地保存m3u8文件
            # with open(f"{self.download_path}/{flow.request.url.split('/')[-1]}", 'wb') as f:
            #     f.write(flow.response.content)
            # 解析m3u8文件
            # playlist = m3u8.load(flow.request.url)
            # asyncio.create_task(self.download_playlist(playlist))

    async def download_playlist(self, playlist):
        async with aiohttp.ClientSession() as session:
            tasks = []
            for i, segment in enumerate(playlist.segments):
                ts_url = segment.absolute_uri
                tasks.append(self.download_ts(ts_url, f"{i:05}.ts", session))
            await asyncio.gather(*tasks)
            self.merge_ts(len(playlist.segments))

    async def download_ts(self, url, filename, session):
        async with session.get(url) as resp:
            with open(f"{self.download_path}/{filename}", 'wb') as f:
                f.write(await resp.read())

    def merge_ts(self, count):
        file_list = '|'.join([f"{self.download_path}/{i:05}.ts" for i in range(count)])
        os.system(f'ffmpeg -i "concat:{file_list}" -c copy {self.download_path}/output.mp4')


addons = [M3U8Sniffer()]
