import math
import os
from pathlib import Path
import queue
import copy
import threading
from urllib.parse import urlparse
import aiofiles
from httpx import AsyncClient
import httpx
import torch
import glob
import pickle
import shutil
import numpy as np
import subprocess
from tqdm import tqdm
import cv2
import imageio
import time
import datetime
from collections import deque
from typing import Optional
from fastapi import FastAPI, HTTPException, UploadFile, File, Form, BackgroundTasks
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import uuid
from omegaconf import OmegaConf
from transformers import WhisperModel
import sys

from musetalk.utils.blending import get_image
from musetalk.utils.face_parsing import FaceParsing
from musetalk.utils.audio_processor import AudioProcessor
from musetalk.utils.utils import get_file_type, get_video_fps, datagen, load_all_model
from musetalk.utils.preprocessing import get_landmark_and_bbox, read_imgs, coord_placeholder

# 全局常量
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
CHECKPOINTS_DIR = os.path.join(PROJECT_DIR, "models")
UPLOAD_DIR = os.path.join(PROJECT_DIR, "uploads")
RESULT_DIR = os.path.join(PROJECT_DIR, "results")
TEMP_DIR = os.path.join(RESULT_DIR, "temp")

# 确保必要的目录存在
os.makedirs(UPLOAD_DIR, exist_ok=True)
os.makedirs(RESULT_DIR, exist_ok=True)
os.makedirs(TEMP_DIR, exist_ok=True)

app = FastAPI(title="MuseTalk API", description="API for MuseTalk video dubbing")

# 允许所有源（不推荐在生产环境使用）
origins = ["*"]

# 配置CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,
    allow_credentials=False,  # 当allow_origins为["*"]时，必须为False
    allow_methods=["*"],
    allow_headers=["*"],
)


# 队列管理全局变量
task_queue = queue.Queue()
task_status = {}
status_lock  = threading.Lock()
processing_flag = False

# 静态文件服务
app.mount("/results", StaticFiles(directory=RESULT_DIR), name="results")

def fast_check_ffmpeg():
    try:
        subprocess.run(["ffmpeg", "-version"], capture_output=True, check=True)
        return True
    except:
        return False

# 初始化模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
weight_dtype = torch.float32

# 加载模型权重
vae, unet, pe = load_all_model(
    unet_model_path=os.path.join(CHECKPOINTS_DIR, "musetalkV15/unet.pth"),
    vae_type="sd-vae",
    unet_config=os.path.join(CHECKPOINTS_DIR, "musetalkV15/musetalk.json"),
    device=device
)

pe = pe.to(device)
vae.vae = vae.vae.to(device)
unet.model = unet.model.to(device)

timesteps = torch.tensor([0], device=device)

# 初始化音频处理器和模型
audio_processor = AudioProcessor(feature_extractor_path=os.path.join(CHECKPOINTS_DIR, "whisper"))
whisper = WhisperModel.from_pretrained(os.path.join(CHECKPOINTS_DIR, "whisper"))
whisper = whisper.to(device=device, dtype=weight_dtype).eval()
whisper.requires_grad_(False)

# 面部解析器
fp = FaceParsing(
    left_cheek_width=90,
    right_cheek_width=90
)

class InferenceRequest(BaseModel):
    bbox_shift: int = 0
    extra_margin: int = 10
    parsing_mode: str = "jaw"
    left_cheek_width: int = 90
    right_cheek_width: int = 90
    batch_size: int = 8
    audio_padding_length_left: int = 2
    audio_padding_length_right: int = 2

def queue_processor():
    print("后台处理任务启动")
    # global processing_flag
    while True:
        try:
            # 从队列中获取任务，如果队列为空，线程会在这里阻塞等待，
            # 且不需要持有锁
            current_task = task_queue.get()
            task_id = current_task["task_id"]

            # 获取状态锁，更新任务状态为处理中
            with status_lock:
                 task_status[task_id].update({
                    "status": "处理中",
                    "start_time": datetime.datetime.now().isoformat(),
                    "position": 0 # 处理中时位置设为0
                })

            print(f"开始处理任务: {task_id}")
            # 执行实际的推理任务 (这个函数内部不应该再使用全局的 queue_lock)
            run_inference(
                task_id=task_id,
                audio_path=current_task["audio_path"],
                video_path=current_task["video_path"],
                params=current_task["params"]
            )

            # 获取状态锁，更新任务状态为完成
            with status_lock:
                task_status[task_id]["status"] = "完成"
                task_status[task_id]["message"] = "处理成功"
            print(f"任务完成: {task_id}")

        except Exception as e:
            print(f"任务处理失败 {task_id}: {e}")
            # 获取状态锁，更新任务状态为失败
            with status_lock:
                task_status[task_id].update({
                    "status": "失败",
                    "message": f"处理失败: {str(e)}"
                })
        finally:
            # 无论成功失败，都要标记这个任务已处理完毕，通知队列
            task_queue.task_done()
            # 可以在这里添加清理逻辑，比如定期清理 task_status 中已完成/失败的任务

# 启动队列处理线程
queue_thread = threading.Thread(target=queue_processor, daemon=True)
queue_thread.start()

async def download_file_from_url(url: str, save_path: str) -> bool:
    """
    异步从URL下载文件到指定路径
    :param url: 远程文件URL
    :param save_path: 本地保存路径（含文件名）
    :return: 下载成功返回True
    """
    try:
        async with httpx.AsyncClient(timeout=30.0) as client:  # 设置30秒超时
            async with client.stream("GET", url) as response:
                response.raise_for_status()  # 检查HTTP状态码（非2xx会抛异常）
                
                # 流式写入文件（支持大文件下载）
                with open(save_path, "wb") as f:
                    async for chunk in response.aiter_bytes(chunk_size=1024*1024):  # 1MB分块
                        f.write(chunk)
        return True
    except httpx.HTTPStatusError as e:
        raise HTTPException(status_code=400, detail=f"远程文件下载失败（HTTP {e.response.status_code}）: {str(e)}")
    except httpx.RequestError as e:
        raise HTTPException(status_code=503, detail=f"网络请求失败: {str(e)}")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文件下载过程中发生未知错误: {str(e)}")

@app.post("/process-urls/")
async def process_files_from_urls(
    audio_url: str = Form(...),  # 改为接收URL字符串
    video_url: str = Form(...),  # 改为接收URL字符串
    bbox_shift: int = Form(0),
    extra_margin: int = Form(10),
    parsing_mode: str = Form("jaw"),
    left_cheek_width: int = Form(90),
    right_cheek_width: int = Form(90)
):
    # 创建任务目录
    task_id = str(uuid.uuid4())
    task_dir = os.path.join(UPLOAD_DIR, task_id)
    Path(task_dir).mkdir(parents=True, exist_ok=True)

    # 下载音频文件
    audio_filename = os.path.basename(urlparse(audio_url).path) or "audio_file"  # 从URL解析文件名
    audio_path = os.path.join(task_dir, audio_filename)
    await download_file_from_url(audio_url, audio_path)

    # 下载视频文件
    video_filename = os.path.basename(urlparse(video_url).path) or "video_file"  # 从URL解析文件名
    video_path = os.path.join(task_dir, video_filename)
    await download_file_from_url(video_url, video_path)

    return {
        "task_id": task_id,
        "message": "任务提交成功",
        "audio_url": audio_url,
        "video_url": video_url,
        "saved_audio_path": audio_path,
        "saved_video_path": video_path
    }

@app.post("/upload-files/")
async def upload_files(
    audio: UploadFile = File(...),
    video: UploadFile = File(...),
    bbox_shift: int = Form(0),
    extra_margin: int = Form(10),
    parsing_mode: str = Form("jaw"),
    left_cheek_width: int = Form(90),
    right_cheek_width: int = Form(90)
):
    task_id = str(uuid.uuid4())
    task_dir = os.path.join(UPLOAD_DIR, task_id)
    os.makedirs(task_dir, exist_ok=True)

    audio_path = os.path.join(task_dir, audio.filename)
    try:
        async with aiofiles.open(audio_path, "wb") as out_file:
            while content := await audio.read(1024 * 1024):  # 每次读取 1MB
                await out_file.write(content)
    except Exception as e:
        # 处理可能的异常，例如磁盘已满
        return {"error": f"保存音频文件失败: {str(e)}"}
    finally:
        await audio.close() # 确保关闭文件

    video_path = os.path.join(task_dir, video.filename)
    try:
        async with aiofiles.open(video_path, "wb") as out_file:
            while content := await video.read(1024 * 1024):  # 每次读取 1MB
                await out_file.write(content)
    except Exception as e:
        return {"error": f"保存视频文件失败: {str(e)}"}
    finally:
        await video.close()

    return {
        "task_id": task_id,
        "message": "文件上传成功",
        "audio_file": audio.filename,
        "video_file": video.filename
    }

@app.post("/process/{task_id}")
async def process_task(
    task_id: str,
    params: InferenceRequest
):
    task_dir = os.path.join(UPLOAD_DIR, task_id)
    if not os.path.exists(task_dir):
        return {"error": "任务ID不存在"}

    audio_files = (
    glob.glob(os.path.join(task_dir, "*.wav")) +   # WAV格式
    glob.glob(os.path.join(task_dir, "*.mp3")) +   # MP3格式
    glob.glob(os.path.join(task_dir, "*.aac")) +   # AAC格式（可选）
    glob.glob(os.path.join(task_dir, "*.flac"))    # FLAC格式（可选）
)
    video_files = (
    glob.glob(os.path.join(task_dir, "*.mp4")) +   # MP4格式
    glob.glob(os.path.join(task_dir, "*.avi")) +   # AVI格式
    glob.glob(os.path.join(task_dir, "*.mov")) +   # MOV格式（关键修改）
    glob.glob(os.path.join(task_dir, "*.mkv")) +   # MKV格式（可选）
    glob.glob(os.path.join(task_dir, "*.wmv"))     # WMV格式（可选）
)
    
    if not audio_files or not video_files:
        return {"error": "找不到文件"}

    with status_lock :
        if task_id in task_status and task_status[task_id]["status"] not in ["完成", "失败"]:
             return {"error": f"任务 {task_id} 已经在处理中或排队中", "status": task_status[task_id]["status"]}

        # 获取当前排队位置 (这个计算在多线程环境下可能略有不准，但比之前好)
        position = task_queue.qsize() + 1 # qsize() 也是线程安全的

        task_status[task_id] = {
            "status": "排队中",
            "position": position,
            "create_time": datetime.datetime.now().isoformat(),
            "start_time": None,
            "message": "",
            "params": params.dict()
        }

    # 将任务添加到队列 (Queue.put() 是线程安全的，不需要额外的锁来保护 put 操作本身)
    task_queue.put({
        "task_id": task_id,
        "audio_path": audio_files[0],
        "video_path": video_files[0],
        "params": params
    })


    return {
        "task_id": task_id,
        "status": "排队中",
        "position": position,
        "message": f"任务已加入队列，当前排位：{position}"
    }

@app.get("/status/{task_id}")
async def get_status(task_id: str):
    result_path = os.path.join(RESULT_DIR, f"{task_id}.mp4")
    if os.path.exists(result_path):
        # todo 文件上传到oss
        # todo 删除临时文件
        # todo 删除上传的文件
        # todo 删除结果文件
        return {
            "task_id": task_id,
            "status": "完成",
            "result_url": f"/results/{task_id}.mp4"
        }

    with status_lock :
        status_info = task_status.get(task_id, {})
    
    if not status_info:
        return {"error": "任务ID不存在"}
    
    if status_info["status"] == "排队中":
        avg_time = 60
        est_wait = status_info["position"] * avg_time
        status_info["estimated_wait"] = f"{est_wait}秒"
    
    return {
        "task_id": task_id,
        "status": status_info["status"],
        "position": status_info.get("position", -1),
        "created_at": status_info["create_time"],
        "started_at": status_info.get("start_time"),
        "estimated_wait": status_info.get("estimated_wait"),
        "message": status_info.get("message", "")
    }

@app.get("/queue-status")
async def get_queue_status():
    # Queue.qsize() 是线程安全的
    pending_tasks_in_queue = task_queue.qsize()
    processing_flag = False # Queue 模式下，无法直接判断是否有任务正在 run_inference

    # 需要获取锁来安全访问 task_status 判断是否有任务状态是“处理中”
    with status_lock:
        # 查找是否有任务状态是“处理中”
        processing_tasks = [tid for tid, info in task_status.items() if info["status"] == "处理中"]
        if processing_tasks:
            processing_flag = True
        recent_tasks_keys = list(task_status.keys())[-10:] # 获取最近10个任务的ID

    return {
        "pending_tasks_in_queue": pending_tasks_in_queue,
        "processing_a_task": processing_flag,
        "processing_task_ids": processing_tasks, # 可以返回正在处理的任务ID
        "recent_tasks": recent_tasks_keys
    }

@app.get("/download/{task_id}")
async def download_result(task_id: str):
    result_path = os.path.join(RESULT_DIR, f"{task_id}.mp4")
    if not os.path.exists(result_path):
        return {"error": "结果不存在"}
    
    return FileResponse(
        result_path,
        media_type="video/mp4",
        filename=f"result_{task_id}.mp4"
    )

@torch.no_grad()
def run_inference(task_id: str, audio_path: str, video_path: str, params: InferenceRequest):
    try:
        input_basename = os.path.basename(video_path).split('.')[0]
        audio_basename = os.path.basename(audio_path).split('.')[0]
        output_basename = f"{input_basename}_{audio_basename}"
        
        task_temp_dir = os.path.join(TEMP_DIR, task_id)
        os.makedirs(task_temp_dir, exist_ok=True)
        
        result_img_save_path = os.path.join(task_temp_dir, output_basename)
        crop_coord_save_path = os.path.join(task_temp_dir, f"{input_basename}.pkl")
        os.makedirs(result_img_save_path, exist_ok=True)
        
        output_vid_name = os.path.join(RESULT_DIR, f"{task_id}.mp4")
        
        if get_file_type(video_path) == "video":
            save_dir_full = os.path.join(task_temp_dir, input_basename)
            os.makedirs(save_dir_full, exist_ok=True)
            cmd = f"ffmpeg -v fatal -i {video_path} -start_number 0 {save_dir_full}/%08d.png"
            os.system(cmd)
            input_img_list = sorted(glob.glob(os.path.join(save_dir_full, '*.[jpJP][pnPN]*[gG]')))
            fps = get_video_fps(video_path)
        else:
            input_img_list = glob.glob(os.path.join(video_path, '*.[jpJP][pnPN]*[gG]'))
            input_img_list = sorted(input_img_list, key=lambda x: int(os.path.splitext(os.path.basename(x))[0]))
            fps = 25
            
        whisper_input_features, librosa_length = audio_processor.get_audio_feature(audio_path)
        whisper_chunks = audio_processor.get_whisper_chunk(
            whisper_input_features,
            device,
            weight_dtype,
            whisper,
            librosa_length,
            fps=fps,
            audio_padding_length_left=params.audio_padding_length_left,
            audio_padding_length_right=params.audio_padding_length_right,
        )
        
        print("提取人脸特征...")
        coord_list, frame_list = get_landmark_and_bbox(input_img_list, params.bbox_shift)
        with open(crop_coord_save_path, 'wb') as f:
            pickle.dump(coord_list, f)
        
        input_latent_list = []
        for bbox, frame in zip(coord_list, frame_list):
            if bbox == coord_placeholder:
                continue
            x1, y1, x2, y2 = bbox
            y2 = y2 + params.extra_margin
            y2 = min(y2, frame.shape[0])
            crop_frame = frame[y1:y2, x1:x2]
            crop_frame = cv2.resize(crop_frame, (256, 256), interpolation=cv2.INTER_LANCZOS4)
            latents = vae.get_latents_for_unet(crop_frame)
            input_latent_list.append(latents)
        
        frame_list_cycle = frame_list + frame_list[::-1]
        coord_list_cycle = coord_list + coord_list[::-1]
        input_latent_list_cycle = input_latent_list + input_latent_list[::-1]
        
        print("开始推理...")
        video_num = len(whisper_chunks) # 总音频同步帧数，作为处理目标的基准
        batch_size = params.batch_size
        gen = datagen(
            whisper_chunks=whisper_chunks,
            vae_encode_latents=input_latent_list_cycle, # 注意 datagen 也会考虑这个长度
            batch_size=batch_size,
            delay_frame=0,
            device=device,
        )

        # 计算总共有多少个批次
        # datagen 的实现会根据最短的长度 (whisper_chunks 或 vae_encode_latents_cycle) 来决定迭代次数
        # 更准确的总批次应该是 math.ceil(min(len(whisper_chunks), len(input_latent_list_cycle)) / batch_size)
        # 但通常 video_num (即 len(whisper_chunks)) 是你想要的最终视频长度，我们以此为目标计算批次
        total_batches = math.ceil(video_num / batch_size)

        res_frame_list = []
        # 在这里使用 tqdm 包裹 enumerate(gen)，并指定总批次数量
        # desc 是进度条的描述文本
        for i, (whisper_batch, latent_batch) in tqdm(enumerate(gen), total=total_batches, desc="Processing Frames推理进度"):
            audio_feature_batch = pe(whisper_batch)
            latent_batch = latent_batch.to(dtype=weight_dtype)

            pred_latents = unet.model(latent_batch, timesteps, encoder_hidden_states=audio_feature_batch).sample
            recon = vae.decode_latents(pred_latents)

            # 将解码的帧添加到列表。recon 的长度通常等于 batch_size，除了最后一个批次可能较小
            for res_frame in recon:
                res_frame_list.append(res_frame)

            # tqdm 会自动根据迭代次数更新进度条

        print("推理完成，开始合成视频...")
        
        print("合成视频...")
        for i, res_frame in enumerate(res_frame_list):
            bbox = coord_list_cycle[i % (len(coord_list_cycle))]
            ori_frame = copy.deepcopy(frame_list_cycle[i % (len(frame_list_cycle))])
            x1, y1, x2, y2 = bbox
            y2 = y2 + params.extra_margin
            y2 = min(y2, frame.shape[0])
            try:
                res_frame = cv2.resize(res_frame.astype(np.uint8), (x2-x1, y2-y1))
            except:
                continue
            
            combine_frame = get_image(
                ori_frame,
                res_frame,
                [x1, y1, x2, y2],
                mode=params.parsing_mode,
                fp=fp
            )
            cv2.imwrite(f"{result_img_save_path}/{str(i).zfill(8)}.png", combine_frame)
        
        temp_vid_path = os.path.join(task_temp_dir, f"temp_{output_basename}.mp4")
        cmd_img2video = f"ffmpeg -y -v warning -r {fps} -f image2 -i {result_img_save_path}/%08d.png -vcodec libx264 -vf format=yuv420p -crf 18 {temp_vid_path}"
        os.system(cmd_img2video)
        
        cmd_combine_audio = f"ffmpeg -y -v warning -i {audio_path} -i {temp_vid_path} {output_vid_name}"
        os.system(cmd_combine_audio)
        
        shutil.rmtree(task_temp_dir)
        print(f"处理完成: {output_vid_name}")
        return output_vid_name
    
    except Exception as e:
        print(f"处理失败: {str(e)}")
        raise

# FFmpeg检查
if not fast_check_ffmpeg():
    print("警告: 找不到ffmpeg，自动配置路径...")
    path_separator = ';' if sys.platform == 'win32' else ':'
    ffmpeg_path = "./ffmpeg-master-latest-win64-gpl-shared/bin" if sys.platform == 'win32' else "./ffmpeg-4.4-amd64-static/"
    os.environ["PATH"] = f"{ffmpeg_path}{path_separator}{os.environ['PATH']}"

if sys.platform == 'win32':
    import asyncio
    asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)