from tqdm import tqdm
from fastapi import FastAPI, File, UploadFile, Form
from fastapi.responses import FileResponse, JSONResponse
from typing import Optional
import os
import shutil
import subprocess
import glob
import pickle
import argparse
import numpy as np
import torch
from omegaconf import OmegaConf
from transformers import WhisperModel
import sys
import cv2
import uuid  # 用于生成唯一 ID

from musetalk.utils.blending import get_image
from musetalk.utils.face_parsing import FaceParsing
from musetalk.utils.audio_processor import AudioProcessor
from musetalk.utils.utils import get_file_type, get_video_fps, datagen, load_all_model
from musetalk.utils.preprocessing import get_landmark_and_bbox, read_imgs, coord_placeholder

def fast_check_ffmpeg():
    try:
        subprocess.run(["ffmpeg", "-version"], capture_output=True, check=True)
        return True
    except:
        return False

app = FastAPI()

# 加载模型和相关组件 (确保路径正确)
try:
    # 使用你原始脚本中的参数，或者在这里硬编码
    class Args:
        def __init__(self, ffmpeg_path="./ffmpeg-4.4-amd64-static/", gpu_id=0, vae_type="sd-vae",
                    #  unet_config="./models/musetalk/config.json",
                     unet_model_path="./models/musetalkV15/unet.pth",
                     whisper_dir="./models/whisper",
                     inference_config="configs/inference/test_img.yaml",  # 这里可以先用一个默认的，后面通过 API 传入
                     bbox_shift=0, result_dir='./results_api', extra_margin=10, fps=25,
                     audio_padding_length_left=2, audio_padding_length_right=2, batch_size=8,
                     output_vid_name=None, use_saved_coord=False, saved_coord=False,
                     use_float16=False, parsing_mode='jaw', left_cheek_width=90,
                     right_cheek_width=90, version="v15"):
            self.ffmpeg_path = ffmpeg_path
            self.gpu_id = gpu_id
            self.vae_type = vae_type
            # self.unet_config = unet_config
            self.unet_model_path = unet_model_path
            self.whisper_dir = whisper_dir
            self.inference_config = inference_config
            self.bbox_shift = bbox_shift
            self.result_dir = result_dir
            self.extra_margin = extra_margin
            self.fps = fps
            self.audio_padding_length_left = audio_padding_length_left
            self.audio_padding_length_right = audio_padding_length_right
            self.batch_size = batch_size
            self.output_vid_name = output_vid_name
            self.use_saved_coord = use_saved_coord
            self.saved_coord = saved_coord
            self.use_float16 = use_float16
            self.parsing_mode = parsing_mode
            self.left_cheek_width = left_cheek_width
            self.right_cheek_width = right_cheek_width
            self.version = version

    global loaded_models, global_args
    global_args = Args() # 初始化全局参数

    # 配置 ffmpeg 路径 (在全局加载时执行一次)
    if not fast_check_ffmpeg():
        print("Adding ffmpeg to PATH")
        path_separator = ';' if sys.platform == 'win32' else ':'
        os.environ["PATH"] = f"{global_args.ffmpeg_path}{path_separator}{os.environ['PATH']}"
        if not fast_check_ffmpeg():
            print("Warning: Unable to find ffmpeg, please ensure ffmpeg is properly installed")

    device = torch.device(f"cuda:{global_args.gpu_id}" if torch.cuda.is_available() else "cpu")
    vae, unet, pe = load_all_model(
        unet_model_path=global_args.unet_model_path,
        vae_type=global_args.vae_type,
        # unet_config=global_args.unet_config,
        device=device
    )
    timesteps = torch.tensor([0], device=device)

    if global_args.use_float16:
        pe = pe.half()
        vae.vae = vae.vae.half()
        unet.model = unet.model.half()

    pe = pe.to(device)
    vae.vae = vae.vae.to(device)
    unet.model = unet.model.to(device)

    audio_processor = AudioProcessor(feature_extractor_path=global_args.whisper_dir)
    weight_dtype = unet.model.dtype
    whisper = WhisperModel.from_pretrained(global_args.whisper_dir)
    whisper = whisper.to(device=device, dtype=weight_dtype).eval()
    whisper.requires_grad_(False)

    if global_args.version == "v15":
        fp = FaceParsing(
            left_cheek_width=global_args.left_cheek_width,
            right_cheek_width=global_args.right_cheek_width
        )
    else:  # v1
        fp = FaceParsing()

    loaded_models = {
        "vae": vae,
        "unet": unet,
        "pe": pe,
        "timesteps": timesteps,
        "audio_processor": audio_processor,
        "whisper": whisper,
        "fp": fp,
        "device": device,
        "weight_dtype": weight_dtype,
    }

    # 确保结果目录存在
    os.makedirs(global_args.result_dir, exist_ok=True)

except Exception as e:
    print(f"Error loading models: {e}")
    loaded_models = None

@app.post("/generate_talking_face/")
async def generate_talking_face(
    video_file: UploadFile = File(...),
    audio_file: UploadFile = File(...),
    output_filename: Optional[str] = Form(None)
):
    if loaded_models is None:
        return JSONResponse(status_code=500, content={"message": "Model loading failed."})

    vae = loaded_models["vae"]
    unet = loaded_models["unet"]
    pe = loaded_models["pe"]
    timesteps = loaded_models["timesteps"]
    audio_processor = loaded_models["audio_processor"]
    whisper = loaded_models["whisper"]
    fp = loaded_models["fp"]
    device = loaded_models["device"]
    weight_dtype = loaded_models["weight_dtype"]

    # 创建一个唯一的临时目录来处理这个请求
    temp_dir = os.path.join(global_args.result_dir, f"temp_{uuid.uuid4()}")
    os.makedirs(temp_dir, exist_ok=True)

    video_path_temp = os.path.join(temp_dir, video_file.filename)
    audio_path_temp = os.path.join(temp_dir, audio_file.filename)
    result_img_save_path = os.path.join(temp_dir, "result_images")
    os.makedirs(result_img_save_path, exist_ok=True)

    try:
        # 保存上传的文件到临时目录
        with open(video_path_temp, "wb") as buffer:
            shutil.copyfileobj(video_file.file, buffer)
        with open(audio_path_temp, "wb") as buffer:
            shutil.copyfileobj(audio_file.file, buffer)

        input_basename = os.path.splitext(video_file.filename)[0]
        audio_basename = os.path.splitext(audio_file.filename)[0]
        output_basename = output_filename if output_filename else f"{input_basename}_{audio_basename}.mp4"
        output_vid_path = os.path.join(global_args.result_dir, output_basename)
        save_dir_full = os.path.join(temp_dir, input_basename)
        crop_coord_save_path = os.path.join(temp_dir, input_basename + ".pkl")

        # 提取帧
        os.makedirs(save_dir_full, exist_ok=True)
        cmd = f"ffmpeg -y -v fatal -i {video_path_temp} -start_number 0 {save_dir_full}/%08d.png"
        subprocess.run(cmd, shell=True, check=True)
        input_img_list = sorted(glob.glob(os.path.join(save_dir_full, '*.[jpJP][pnPN]*[gG]')))
        fps = get_video_fps(video_path_temp) if get_file_type(video_path_temp) == "video" else global_args.fps

        # 提取音频特征
        whisper_input_features, librosa_length = audio_processor.get_audio_feature(audio_path_temp)
        whisper_chunks = audio_processor.get_whisper_chunk(
            whisper_input_features,
            device,
            weight_dtype,
            whisper,
            librosa_length,
            fps=fps,
            audio_padding_length_left=global_args.audio_padding_length_left,
            audio_padding_length_right=global_args.audio_padding_length_right,
        )

        # 预处理图像
        if os.path.exists(crop_coord_save_path) and global_args.use_saved_coord:
            print("Using saved coordinates")
            with open(crop_coord_save_path, 'rb') as f:
                coord_list = pickle.load(f)
            frame_list = read_imgs(input_img_list)
        else:
            print("Extracting landmarks... time-consuming operation")
            coord_list, frame_list = get_landmark_and_bbox(input_img_list, global_args.bbox_shift)
            with open(crop_coord_save_path, 'wb') as f:
                pickle.dump(coord_list, f)

        print(f"Number of frames: {len(frame_list)}")

        # 处理每一帧
        input_latent_list = []
        for bbox, frame in zip(coord_list, frame_list):
            if bbox == coord_placeholder:
                continue
            x1, y1, x2, y2 = bbox
            if global_args.version == "v15":
                y2 = y2 + global_args.extra_margin
                y2 = min(y2, frame.shape[0])
            crop_frame = frame[y1:y2, x1:x2]
            crop_frame = cv2.resize(crop_frame, (256, 256), interpolation=cv2.INTER_LANCZOS4)
            latents = vae.get_latents_for_unet(crop_frame)
            input_latent_list.append(latents)

        # 平滑处理
        frame_list_cycle = frame_list + frame_list[::-1]
        coord_list_cycle = coord_list + coord_list[::-1]
        input_latent_list_cycle = input_latent_list + input_latent_list[::-1]

        # 批量推理
        print("Starting inference")
        video_num = len(whisper_chunks)
        batch_size = global_args.batch_size
        gen = datagen(
            whisper_chunks=whisper_chunks,
            vae_encode_latents=input_latent_list_cycle,
            batch_size=batch_size,
            delay_frame=0,
            device=device,
        )

        res_frame_list = []
        total = int(np.ceil(float(video_num) / batch_size))

        # debug
        try:
            first_batch = next(gen)
            print("First batch:", first_batch)
        except StopIteration:
            print("datagen produced no data.")
        except Exception as e:
            print(f"Error getting first batch from datagen: {e}")
        # 推理debug
        for i, (whisper_batch, latent_batch) in enumerate(tqdm(gen, total=total, desc="Inference")):
            torch.cuda.empty_cache()
            print(f"Iteration {i} - Before pe(whisper_batch)")
            audio_feature_batch = pe(whisper_batch)
            print(f"Iteration {i} - After pe(whisper_batch)")
            latent_batch = latent_batch.to(dtype=unet.model.dtype)
            print(f"Iteration {i} - Before unet.model(...)")
            pred_latents = unet.model(latent_batch, timesteps, encoder_hidden_states=audio_feature_batch).sample
            print(f"Iteration {i} - After unet.model(...)")
            recon = vae.decode_latents(pred_latents)
            print(f"Iteration {i} - After vae.decode_latents(...)")
            for res_frame in recon:
                res_frame_list.append(res_frame)
        # end

        # 推理老代码
        '''
        for i, (whisper_batch, latent_batch) in enumerate(tqdm(gen, total=total, desc="Inference")):
            audio_feature_batch = pe(whisper_batch)
            latent_batch = latent_batch.to(dtype=unet.model.dtype)

            pred_latents = unet.model(latent_batch, timesteps, encoder_hidden_states=audio_feature_batch).sample
            recon = vae.decode_latents(pred_latents)
            for res_frame in recon:
                res_frame_list.append(res_frame)
        '''

        # 填充图像
        print("Padding generated images to original video size")
        for i, res_frame in enumerate(tqdm(res_frame_list, desc="Padding")):
            bbox = coord_list_cycle[i % (len(coord_list_cycle))]
            ori_frame = copy.deepcopy(frame_list_cycle[i % (len(frame_list_cycle))])
            x1, y1, x2, y2 = bbox
            if global_args.version == "v15":
                y2 = y2 + global_args.extra_margin
                y2 = min(y2, frame.shape[0])
            try:
                res_frame = cv2.resize(res_frame.astype(np.uint8), (x2 - x1, y2 - y1))
            except:
                continue

            if global_args.version == "v15":
                combine_frame = get_image(ori_frame, res_frame, [x1, y1, x2, y2], mode=global_args.parsing_mode, fp=fp)
            else:
                combine_frame = get_image(ori_frame, res_frame, [x1, y1, x2, y2], fp=fp)
            cv2.imwrite(f"{result_img_save_path}/{str(i).zfill(8)}.png", combine_frame)

        # 保存视频
        temp_vid_path = os.path.join(temp_dir, "temp_video.mp4")
        cmd_img2video = f"ffmpeg -y -v warning -r {fps} -f image2 -i {result_img_save_path}/%08d.png -vcodec libx264 -vf format=yuv420p -crf 18 {temp_vid_path}"
        subprocess.run(cmd_img2video, shell=True, check=True)

        cmd_combine_audio = f"ffmpeg -y -v warning -i {audio_path_temp} -i {temp_vid_path} -c copy -map 0:a -map 1:v {output_vid_path}"
        subprocess.run(cmd_combine_audio, shell=True, check=True)

        return FileResponse(output_vid_path, media_type="video/mp4", filename=output_basename)

    except subprocess.CalledProcessError as e:
        return JSONResponse(status_code=500, content={"message": f"FFmpeg error: {e}"})
    except Exception as e:
        return JSONResponse(status_code=500, content={"message": f"Processing error: {e}"})
    finally:
        # 清理临时文件和目录
        shutil.rmtree(temp_dir, ignore_errors=True)