import os  
import copy
import threading  
import torch  
import glob  
import pickle  
import shutil  
import numpy as np  
import subprocess  
from tqdm import tqdm  
import cv2  
import imageio  
from typing import Optional  
from fastapi import FastAPI, UploadFile, File, Form, BackgroundTasks  
from fastapi.responses import FileResponse  
from fastapi.staticfiles import StaticFiles  
from pydantic import BaseModel  
import uuid  
from omegaconf import OmegaConf  
from transformers import WhisperModel  
import sys  
  
from musetalk.utils.blending import get_image  
from musetalk.utils.face_parsing import FaceParsing  
from musetalk.utils.audio_processor import AudioProcessor  
from musetalk.utils.utils import get_file_type, get_video_fps, datagen, load_all_model  
from musetalk.utils.preprocessing import get_landmark_and_bbox, read_imgs, coord_placeholder

# 全局常量  
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))  
CHECKPOINTS_DIR = os.path.join(PROJECT_DIR, "models")  
UPLOAD_DIR = os.path.join(PROJECT_DIR, "uploads")  
RESULT_DIR = os.path.join(PROJECT_DIR, "results")  
TEMP_DIR = os.path.join(RESULT_DIR, "temp")  
  
# 确保必要的目录存在  
os.makedirs(UPLOAD_DIR, exist_ok=True)  
os.makedirs(RESULT_DIR, exist_ok=True)  
os.makedirs(TEMP_DIR, exist_ok=True)  
  
app = FastAPI(title="MuseTalk API", description="API for MuseTalk video dubbing")  
#  加锁
lock = threading.Lock()
  
# 静态文件服务  
app.mount("/results", StaticFiles(directory=RESULT_DIR), name="results")  
  
# 检查ffmpeg是否安装  
def fast_check_ffmpeg():  
    try:  
        subprocess.run(["ffmpeg", "-version"], capture_output=True, check=True)  
        return True  
    except:  
        return False
    
    # 模型加载  
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  
weight_dtype = torch.float32  # 默认使用float32，可通过配置改为float16  
  
# 加载模型权重  
vae, unet, pe = load_all_model(  
    unet_model_path=os.path.join(CHECKPOINTS_DIR, "musetalkV15/unet.pth"),   
    vae_type="sd-vae",  
    unet_config=os.path.join(CHECKPOINTS_DIR, "musetalkV15/musetalk.json"),  
    device=device  
)  
  
# 移动模型到指定设备  
pe = pe.to(device)  
vae.vae = vae.vae.to(device)  
unet.model = unet.model.to(device)  
  
timesteps = torch.tensor([0], device=device)  
  
# 初始化音频处理器和Whisper模型  
audio_processor = AudioProcessor(feature_extractor_path=os.path.join(CHECKPOINTS_DIR, "whisper"))  
whisper = WhisperModel.from_pretrained(os.path.join(CHECKPOINTS_DIR, "whisper"))  
whisper = whisper.to(device=device, dtype=weight_dtype).eval()  
whisper.requires_grad_(False)  
  
# 初始化面部解析器  
fp = FaceParsing(  
    left_cheek_width=90,  
    right_cheek_width=90  
)

class InferenceRequest(BaseModel):  
    bbox_shift: int = 0  
    extra_margin: int = 10  
    parsing_mode: str = "jaw"  
    left_cheek_width: int = 90  
    right_cheek_width: int = 90  
    batch_size: int = 8  
    audio_padding_length_left: int = 2  
    audio_padding_length_right: int = 2  
      
@app.post("/upload-files/")  
async def upload_files(  
    audio: UploadFile = File(...),  
    video: UploadFile = File(...),  
    bbox_shift: int = Form(0),  
    extra_margin: int = Form(10),  
    parsing_mode: str = Form("jaw"),  
    left_cheek_width: int = Form(90),  
    right_cheek_width: int = Form(90)  
):  
    # 生成唯一ID作为任务标识  
    task_id = str(uuid.uuid4())  
    task_dir = os.path.join(UPLOAD_DIR, task_id)  
    os.makedirs(task_dir, exist_ok=True)  
      
    # 保存上传的文件  
    audio_path = os.path.join(task_dir, audio.filename)  
    with open(audio_path, "wb") as f:  
        f.write(await audio.read())  
          
    video_path = os.path.join(task_dir, video.filename)  
    with open(video_path, "wb") as f:  
        f.write(await video.read())  
      
    return {  
        "task_id": task_id,  
        "message": "文件上传成功，请使用任务ID获取处理结果",  
        "audio_file": audio.filename,  
        "video_file": video.filename  
    }  
  
@app.post("/process/{task_id}")  
async def process_task(  
    task_id: str,  
    background_tasks: BackgroundTasks,  
    params: InferenceRequest  
):  
    task_dir = os.path.join(UPLOAD_DIR, task_id)  
    if not os.path.exists(task_dir):  
        return {"error": "任务ID不存在"}  
      
    # 获取上传的文件  
    audio_files = glob.glob(os.path.join(task_dir, "*.wav")) + glob.glob(os.path.join(task_dir, "*.mp3"))  
    video_files = glob.glob(os.path.join(task_dir, "*.mp4")) + glob.glob(os.path.join(task_dir, "*.avi"))  
      
    if not audio_files or not video_files:  
        return {"error": "找不到音频或视频文件"}  
      
    audio_path = audio_files[0]  
    video_path = video_files[0]  
      
    # 在后台执行推理任务  
    background_tasks.add_task(  
        run_inference,  
        task_id=task_id,  
        audio_path=audio_path,  
        video_path=video_path,  
        params=params  
    )  
      
    return {  
        "task_id": task_id,  
        "status": "处理中",  
        "message": "视频处理任务已开始，请稍后使用任务ID查询结果"  
    }  
  
@app.get("/status/{task_id}")  
async def get_status(task_id: str):  
    result_path = os.path.join(RESULT_DIR, f"{task_id}.mp4")  
    if os.path.exists(result_path):  
        return {  
            "task_id": task_id,  
            "status": "完成",  
            "result_url": f"/results/{task_id}.mp4"  
        }  
      
    task_dir = os.path.join(UPLOAD_DIR, task_id)  
    if not os.path.exists(task_dir):  
        return {"error": "任务ID不存在"}  
      
    return {  
        "task_id": task_id,  
        "status": "处理中"  
    }  
  
@app.get("/download/{task_id}")  
async def download_result(task_id: str):  
    result_path = os.path.join(RESULT_DIR, f"{task_id}.mp4")  
    if not os.path.exists(result_path):  
        return {"error": "结果文件不存在"}  
      
    return FileResponse(  
        result_path,  
        media_type="video/mp4",  
        filename=f"musetalk_result_{task_id}.mp4"  
    )

@torch.no_grad()  
def run_inference(task_id: str, audio_path: str, video_path: str, params: InferenceRequest):
    with lock:  # 确保线程安全  
        try:  
            # 设置输出路径  
            input_basename = os.path.basename(video_path).split('.')[0]  
            audio_basename = os.path.basename(audio_path).split('.')[0]  
            output_basename = f"{input_basename}_{audio_basename}"  
            
            # 创建临时目录  
            task_temp_dir = os.path.join(TEMP_DIR, task_id)  
            os.makedirs(task_temp_dir, exist_ok=True)  
            
            # 设置结果保存路径  
            result_img_save_path = os.path.join(task_temp_dir, output_basename)  
            crop_coord_save_path = os.path.join(task_temp_dir, f"{input_basename}.pkl")  
            os.makedirs(result_img_save_path, exist_ok=True)  
            
            # 设置输出视频路径  
            output_vid_name = os.path.join(RESULT_DIR, f"{task_id}.mp4")  
            
            # 提取源视频帧  
            if get_file_type(video_path) == "video":  
                save_dir_full = os.path.join(task_temp_dir, input_basename)  
                os.makedirs(save_dir_full, exist_ok=True)  
                # 使用ffmpeg提取帧  
                cmd = f"ffmpeg -v fatal -i {video_path} -start_number 0 {save_dir_full}/%08d.png"  
                os.system(cmd)  
                input_img_list = sorted(glob.glob(os.path.join(save_dir_full, '*.[jpJP][pnPN]*[gG]')))  
                fps = get_video_fps(video_path)  
            else:  # 输入图像文件夹  
                input_img_list = glob.glob(os.path.join(video_path, '*.[jpJP][pnPN]*[gG]'))  
                input_img_list = sorted(input_img_list, key=lambda x: int(os.path.splitext(os.path.basename(x))[0]))  
                fps = 25  # 默认帧率  
                
            # 提取音频特征  
            whisper_input_features, librosa_length = audio_processor.get_audio_feature(audio_path)  
            whisper_chunks = audio_processor.get_whisper_chunk(  
                whisper_input_features,   
                device,   
                weight_dtype,   
                whisper,   
                librosa_length,  
                fps=fps,  
                audio_padding_length_left=params.audio_padding_length_left,  
                audio_padding_length_right=params.audio_padding_length_right,  
            )  
            
            # 预处理输入图像  
            print("提取人脸特征...")  
            coord_list, frame_list = get_landmark_and_bbox(input_img_list, params.bbox_shift)  
            with open(crop_coord_save_path, 'wb') as f:  
                pickle.dump(coord_list, f)  
            
            # 处理每一帧  
            input_latent_list = []  
            for bbox, frame in zip(coord_list, frame_list):  
                if bbox == coord_placeholder:  
                    continue  
                x1, y1, x2, y2 = bbox  
                y2 = y2 + params.extra_margin  
                y2 = min(y2, frame.shape[0])  
                crop_frame = frame[y1:y2, x1:x2]  
                crop_frame = cv2.resize(crop_frame, (256, 256), interpolation=cv2.INTER_LANCZOS4)  
                latents = vae.get_latents_for_unet(crop_frame)  
                input_latent_list.append(latents)  
            
            # 循环平滑处理  
            frame_list_cycle = frame_list + frame_list[::-1]  
            coord_list_cycle = coord_list + coord_list[::-1]  
            input_latent_list_cycle = input_latent_list + input_latent_list[::-1]  
            
            # 批量推理  
            print("开始推理...")  
            video_num = len(whisper_chunks)  
            batch_size = params.batch_size  
            gen = datagen(  
                whisper_chunks=whisper_chunks,  
                vae_encode_latents=input_latent_list_cycle,  
                batch_size=batch_size,  
                delay_frame=0,  
                device=device,  
            )  
            
            res_frame_list = []  
            for i, (whisper_batch, latent_batch) in enumerate(gen):  
                audio_feature_batch = pe(whisper_batch)  
                # 确保latent_batch与模型权重类型一致  
                latent_batch = latent_batch.to(dtype=weight_dtype)  
                
                pred_latents = unet.model(latent_batch, timesteps, encoder_hidden_states=audio_feature_batch).sample  
                recon = vae.decode_latents(pred_latents)  
                for res_frame in recon:  
                    res_frame_list.append(res_frame)  
            
            # 将生成的图像填充到原始视频大小  
            print("将生成的图像合成到原始视频...")  
            for i, res_frame in enumerate(res_frame_list):  
                bbox = coord_list_cycle[i % (len(coord_list_cycle))]  
                ori_frame = copy.deepcopy(frame_list_cycle[i % (len(frame_list_cycle))])  
                x1, y1, x2, y2 = bbox  
                y2 = y2 + params.extra_margin  
                y2 = min(y2, frame.shape[0])  
                try:  
                    res_frame = cv2.resize(res_frame.astype(np.uint8), (x2-x1, y2-y1))  
                except:  
                    continue  
                
                # 使用v15版本的混合方法  
                combine_frame = get_image(  
                    ori_frame,   
                    res_frame,   
                    [x1, y1, x2, y2],   
                    mode=params.parsing_mode,   
                    fp=fp  
                )  
                cv2.imwrite(f"{result_img_save_path}/{str(i).zfill(8)}.png", combine_frame)  
            
            # 生成视频并添加音频  
            temp_vid_path = os.path.join(task_temp_dir, f"temp_{output_basename}.mp4")  
            cmd_img2video = f"ffmpeg -y -v warning -r {fps} -f image2 -i {result_img_save_path}/%08d.png -vcodec libx264 -vf format=yuv420p -crf 18 {temp_vid_path}"  
            os.system(cmd_img2video)  
            
            cmd_combine_audio = f"ffmpeg -y -v warning -i {audio_path} -i {temp_vid_path} {output_vid_name}"  
            os.system(cmd_combine_audio)  
            
            # 清理临时文件  
            shutil.rmtree(task_temp_dir)  
            
            print(f"结果已保存到 {output_vid_name}")  
            return output_vid_name  
        except Exception as e:  
            print(f"处理过程中出错: {e}")  
            return None
    
    # 检查ffmpeg是否已安装  
if not fast_check_ffmpeg():  
    print("警告: 找不到ffmpeg，请确保ffmpeg已正确安装")  
    # 根据操作系统选择路径分隔符  
    path_separator = ';' if sys.platform == 'win32' else ':'  
    ffmpeg_path = "./ffmpeg-master-latest-win64-gpl-shared/bin" if sys.platform == 'win32' else "./ffmpeg-4.4-amd64-static/"  
    os.environ["PATH"] = f"{ffmpeg_path}{path_separator}{os.environ['PATH']}"  
  
# 解决Windows上的异步IO问题  
if sys.platform == 'win32':  
    import asyncio  
    asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())  
  
if __name__ == "__main__":  
    import uvicorn  
    uvicorn.run(app, host="0.0.0.0", port=8000)