import logging
import torch
import os
import random
import sys
import tqdm
import soundfile as sf
import numpy as np
import uvicorn
from fastapi import FastAPI, BackgroundTasks
from pydantic import BaseModel

from data_util.face3d_helper import Face3DHelper

from utils.commons.multiprocess_utils import multiprocess_run_tqdm
from utils.commons.os_utils import multiprocess_glob

from deep_3drecon.deep_3drecon_models.bfm import ParametricFaceModel
from deep_3drecon.secc_renderer import SECC_Renderer

from data_gen.utils.process_audio.extract_hubert import get_hubert_from_16k_speech
from data_gen.utils.process_audio.extract_mel_f0 import extract_mel_f0_from_video_name
from data_gen.utils.process_video.extract_segment_imgs import load_file, save_file, extract_segment_job, get_todo_vid_names as get_imgs_todo_names
from data_gen.utils.process_video.extract_lm2d import get_todo_vid_names, extract_landmark_job
from data_gen.utils.process_video.fit_3dmm_landmark import fit_3dmm_for_a_video
from data_gen.runs.binarizer_nerf import Binarizer

from inference.genefacepp_infer import GeneFace2Infer

from utils.commons.hparams import set_web_train_param

from tasks.run import run_task

logging.basicConfig(
    filename='log.txt',
    filemode='a+',
    format='%(asctime)s %(name)s:%(levelname)s:%(message)s',
    datefmt="%d-%M-%Y %H:%M:%S",
    level=logging.ERROR,
)

app = FastAPI()

class SimpleInference(BaseModel):
    """
    Args:
        model_select: 选择已训练好的模型
        audio_path: 音频路径 带后缀名 推荐绝对路径
        out_name: 输出路径 带后缀名 推荐绝对路径
    """
    model_select: str
    audio_path: str
    out_name: str

class InferenceParam(BaseModel):
    a2m_ckpt: str="checkpoints/audio2motion_vae"
    postnet_ckpt: str=""
    head_ckpt: str=""
    torso_ckpt: str
    drv_audio_name: str
    drv_pose: str="nearest"
    blink_mode: str="period"
    temperature: float=0.2
    mouth_amp: float=0.4
    lle_percent: float=0.2
    debug: bool=False
    out_name: str
    raymarching_end_threshold: float=0.01
    low_memory_usage: bool=False

class PreprocesseExtractImg(BaseModel):
    vid_dir:str
    ds_name:str = "nerf"
    num_workers:int = 48
    seed:int = 0
    process_id: int = 0
    total_process: int = 1
    reset: bool = False
    load_names: bool = False
    background_method: str = "knn"
    total_gpus: int = 2
    no_mix_bg: bool = False
    store_in_memory: bool = False
    force_single_process: bool = True

class PreprocesseExtractLm2d(BaseModel):
    vid_dir: str
    ds_name: str = 'nerf'
    num_workers: int = 2
    process_id: int = 0
    total_process: int = 1
    reset: bool = False
    load_names: bool = False

class PreprocesseFit3dmm(BaseModel):
    vid_dir: str
    ds_name: str = "nerf"
    seed: int = 0
    process_id: int = 0
    total_process: int = 1
    id_mode: str = "global"
    keypoint_mode: str = "mediapipe"
    large_yaw_threshold: float = 9999999.9
    debug: bool = True
    reset: bool = True
    load_names: bool = False


class TrainParam(BaseModel):
    config: str
    exp_name: str
    hparams: str=""
    infer: bool=False
    is_validate: bool=False
    reset: bool=False
    remove: bool=False
    debug: bool=False
    start_rank: int=0
    world_size: int=-1
    init_method: str="tcp"



@app.get("/")
def read_root():
    return {"Hello": "World"}

@app.post("/inference_local")
def inference_local(param: InferenceParam):
    if param.torso_ckpt is None or param.torso_ckpt == "":
        # raise Exception("torso_ckpt can not be empty!")
        logging.error("torso_ckpt can not be empty!")
        return {"status": "error"}

    GeneFace2Infer.example_run(param)

    return {"status": "ok"}

@app.post("/inference_stream")
def inference_stream(param: InferenceParam, background_tasks: BackgroundTasks):
    if param.torso_ckpt is None or param.torso_ckpt == "":
        # raise Exception("torso_ckpt can not be empty!")
        logging.error("torso_ckpt can not be empty!")
        return {"status": "error"}
    
    # 将 forward_secc2video 函数作为后台任务执行
    background_tasks.add_task(GeneFace2Infer.example_run, param)

    # 如果经过等待仍未生成 out_name 文件，返回错误信息
    return {"status": "ok"}

@app.post("/simple_inference")
def simple_inference(param: SimpleInference):
    try:
        inferenceParam = {
            'head_ckpt': f'checkpoints/motion2video_nerf/{param.model_select}_head',
            'torso_ckpt': f'checkpoints/motion2video_nerf/{param.model_select}_torso',
            'drv_audio_name': param.audio_path,
            'out_name': param.out_name,
        }
        inference_local(InferenceParam(**inferenceParam))
    except:
        logging.exception("simple inference error")
        return {"status": "error"}

    return {"status": "ok"}

@app.post("/preprocesse_crop")
def preprocesse_crop(VIDEO_ID: str):
    """
    将视频Crop到512×512分辨率 25FPS 确保每一帧都有目标人脸
    """
    os.system(f'ffmpeg -i data/raw/videos/{VIDEO_ID}.mp4 -vf fps=25,scale=w=512:h=512 -qmin 1 -q:v 1 data/raw/videos/{VIDEO_ID}_512.mp4')
    os.system(f'mv data/raw/videos/{VIDEO_ID}.mp4 data/raw/videos/{VIDEO_ID}_to_rm.mp4')
    os.system(f'mv data/raw/videos/{VIDEO_ID}_512.mp4 data/raw/videos/{VIDEO_ID}.mp4')

    return {"status": "ok"}

@app.post("/preprocesse_extract_audio")
def preprocesse_extract_audio(VIDEO_ID: str):
    """
    提取音频特征
    """
    os.system(f'mkdir -p data/processed/videos/{VIDEO_ID}')
    os.system(f'ffmpeg -i data/raw/videos/{VIDEO_ID}.mp4 -f wav -ar 16000 data/processed/videos/{VIDEO_ID}/aud.wav ')

    wav_16k_name = f"data/processed/videos/{VIDEO_ID}/aud.wav"
    hubert_npy_name = f"data/processed/videos/{VIDEO_ID}/aud_hubert.npy"
    speech_16k, _ = sf.read(wav_16k_name)
    hubert_hidden = get_hubert_from_16k_speech(speech_16k)
    np.save(hubert_npy_name, hubert_hidden.detach().numpy())
    print(f"Saved at {hubert_npy_name}")

    wav_16k_name = f"data/processed/videos/{VIDEO_ID}/aud.wav"
    out_name = f"data/processed/videos/{VIDEO_ID}/aud_mel_f0.npy"
    extract_mel_f0_from_video_name(wav_16k_name, out_name)
    print(f"Saved at {out_name}")

    return {"status": "ok"}

@app.post("/preprocesse_extract_img")
def preprocesse_extract_img(args: PreprocesseExtractImg):
    """
    提取图片
    """
    VIDEO_ID = os.path.basename(args.vid_dir).split('.')[0]
    os.system(f'mkdir -p data/processed/videos/{VIDEO_ID}/gt_imgs')
    os.system(f'ffmpeg -i data/raw/videos/{VIDEO_ID}.mp4 -vf fps=25,scale=w=512:h=512 -qmin 1 -q:v 1 -start_number 0 data/processed/videos/{VIDEO_ID}/gt_imgs/%08d.jpg')


    vid_dir = args.vid_dir
    ds_name = args.ds_name
    load_names = args.load_names
    background_method = args.background_method
    total_gpus = args.total_gpus
    mix_bg = not args.no_mix_bg
    store_in_memory = args.store_in_memory
    force_single_process = args.force_single_process

    if ds_name.lower() == 'nerf': # 处理单个视频
        vid_names = [vid_dir]
        out_names = [video_name.replace("/raw/", "/processed/").replace(".mp4","_lms.npy") for video_name in vid_names]
    else: # 处理整个数据集
        if ds_name in ['lrs3_trainval']:
            vid_name_pattern = os.path.join(vid_dir, "*/*.mp4")
        elif ds_name in ['TH1KH_512', 'CelebV-HQ']:
            vid_name_pattern = os.path.join(vid_dir, "*.mp4")
        elif ds_name in ['lrs2', 'lrs3', 'voxceleb2']:
            vid_name_pattern = os.path.join(vid_dir, "*/*/*.mp4")
        elif ds_name in ["RAVDESS", 'VFHQ']:
            vid_name_pattern = os.path.join(vid_dir, "*/*/*/*.mp4")
        else:
            logging.error("not implements error")
            return {"ststus": "error"}
        
        vid_names_path = os.path.join(vid_dir, "vid_names.pkl")
        if os.path.exists(vid_names_path) and load_names:
            print(f"loading vid names from {vid_names_path}")
            vid_names = load_file(vid_names_path)
        else:
            vid_names = multiprocess_glob(vid_name_pattern)
        vid_names = sorted(vid_names)
        print(f"saving vid names to {vid_names_path}")
        save_file(vid_names_path, vid_names)

    vid_names = sorted(vid_names)
    random.seed(args.seed)
    random.shuffle(vid_names)

    process_id = args.process_id
    total_process = args.total_process
    if total_process > 1:
        assert process_id <= total_process -1
        num_samples_per_process = len(vid_names) // total_process
        if process_id == total_process:
            vid_names = vid_names[process_id * num_samples_per_process : ]
        else:
            vid_names = vid_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
    
    if not args.reset:
        vid_names = get_imgs_todo_names(vid_names, background_method)
    print(f"todo videos number: {len(vid_names)}")

    device = "cuda" if total_gpus > 0 else "cpu"
    extract_job = extract_segment_job
    fn_args = [(vid_name, ds_name=='nerf', background_method, device, total_gpus, mix_bg, store_in_memory, force_single_process) for i, vid_name in enumerate(vid_names)]

    try:
        if ds_name == 'nerf': # 处理单个视频
            extract_job(*fn_args[0])
        else:
            for vid_name in multiprocess_run_tqdm(extract_job, fn_args, desc=f"Root process {args.process_id}:  segment images", num_workers=args.num_workers):
                pass
    except Exception as e:
        logging.exception("preprocesse extract img error")
        return {"status": "error"}

    return {"status": "ok"}

@app.post("/preprocesse_extract_lm2d")
def preprocesse_extract_lm2d(args: PreprocesseExtractLm2d):
    """
    提取lm2d_mediapipe
    """
    vid_dir = args.vid_dir
    ds_name = args.ds_name
    load_names = args.load_names

    if ds_name.lower() == 'nerf': # 处理单个视频
        vid_names = [vid_dir]
        out_names = [video_name.replace("/raw/", "/processed/").replace(".mp4","/lms_2d.npy") for video_name in vid_names]
    else: # 处理整个数据集
        if ds_name in ['lrs3_trainval']:
            vid_name_pattern = os.path.join(vid_dir, "*/*.mp4")
        elif ds_name in ['TH1KH_512', 'CelebV-HQ']:
            vid_name_pattern = os.path.join(vid_dir, "*.mp4")
        elif ds_name in ['lrs2', 'lrs3', 'voxceleb2', 'CMLR']:
            vid_name_pattern = os.path.join(vid_dir, "*/*/*.mp4")
        elif ds_name in ["RAVDESS", 'VFHQ']:
            vid_name_pattern = os.path.join(vid_dir, "*/*/*/*.mp4")
        else:
            logging.error("not implemented error")
            return {"status": "error"}

        try:
            vid_names_path = os.path.join(vid_dir, "vid_names.pkl")
            if os.path.exists(vid_names_path) and load_names:
                print(f"loading vid names from {vid_names_path}")
                vid_names = load_file(vid_names_path)
            else:
                vid_names = multiprocess_glob(vid_name_pattern)
            vid_names = sorted(vid_names)
            if not load_names:
                print(f"saving vid names to {vid_names_path}")
                save_file(vid_names_path, vid_names)
            out_names = [video_name.replace("/video/", "/lms_2d/").replace(".mp4","_lms.npy") for video_name in vid_names]
        except Exception as e:
            print(e)
            return {"status": "error"}

    process_id = args.process_id
    total_process = args.total_process
    if total_process > 1:
        assert process_id <= total_process -1
        num_samples_per_process = len(vid_names) // total_process
        if process_id == total_process:
            vid_names = vid_names[process_id * num_samples_per_process : ]
        else:
            vid_names = vid_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
    
    if not args.reset:
        vid_names = get_todo_vid_names(vid_names)
    print(f"todo videos number: {len(vid_names)}")

    try:
        fail_cnt = 0
        job_args = [(vid_name, ds_name=='nerf') for vid_name in vid_names]
        for (i, res) in multiprocess_run_tqdm(extract_landmark_job, job_args, num_workers=args.num_workers, desc=f"Root {args.process_id}: extracing MP-based landmark2d"): 
            if res is False:
                fail_cnt += 1
            print(f"finished {i + 1} / {len(vid_names)} = {(i + 1) / len(vid_names):.4f}, failed {fail_cnt} / {i + 1} = {fail_cnt / (i + 1):.4f}")
            sys.stdout.flush()
            pass
    except Exception as e:
        logging.exception("preprocesse extract lm2d error")
        return {"status": "error"}

    return {"status": "ok"}

@app.post("/preprocesse_fit_3dmm")
def preprocesse_fit_3dmm(args: PreprocesseFit3dmm):
    """
    Fit 3DMM
    """
    vid_dir = args.vid_dir
    ds_name = args.ds_name
    load_names = args.load_names
    
    print(f"args {args}")
    
    if ds_name.lower() == 'nerf': # 处理单个视频
        vid_names = [vid_dir]
        out_names = [video_name.replace("/raw/", "/processed/").replace(".mp4","_coeff_fit_mp.npy") for video_name in vid_names]
    else: # 处理整个数据集
        try:
            if ds_name in ['lrs3_trainval']:
                vid_name_pattern = os.path.join(vid_dir, "*/*.mp4")
            elif ds_name in ['TH1KH_512', 'CelebV-HQ']:
                vid_name_pattern = os.path.join(vid_dir, "*.mp4")
            elif ds_name in ['lrs2', 'lrs3', 'voxceleb2', 'CMLR']:
                vid_name_pattern = os.path.join(vid_dir, "*/*/*.mp4")
            elif ds_name in ["RAVDESS", 'VFHQ']:
                vid_name_pattern = os.path.join(vid_dir, "*/*/*/*.mp4")
            else:
                logging.error("not implemented error")
                return {"status": "ok"}
            
            vid_names_path = os.path.join(vid_dir, "vid_names.pkl")
            if os.path.exists(vid_names_path) and load_names:
                print(f"loading vid names from {vid_names_path}")
                vid_names = load_file(vid_names_path)
            else:
                vid_names = multiprocess_glob(vid_name_pattern)
            vid_names = sorted(vid_names)
            print(f"saving vid names to {vid_names_path}")
            save_file(vid_names_path, vid_names)
            out_names = [video_name.replace("/video/", "/coeff_fit_mp/").replace(".mp4","_coeff_fit_mp.npy") for video_name in vid_names]
        except Exception as e:
            logging.exception("preprocesse fi 3dmm error")
            return {"status": "error"}

    print(vid_names[:10])
    random.seed(args.seed)
    random.shuffle(vid_names)

    face_model = ParametricFaceModel(bfm_folder='deep_3drecon/BFM', 
                camera_distance=10, focal=1015, keypoint_mode=args.keypoint_mode)
    face_model.to(torch.device("cuda:0"))
    
    process_id = args.process_id
    total_process = args.total_process
    if total_process > 1:
        assert process_id <= total_process -1
        num_samples_per_process = len(vid_names) // total_process
        if process_id == total_process:
            vid_names = vid_names[process_id * num_samples_per_process : ]
        else:
            vid_names = vid_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]

    if not args.reset:
        vid_names = get_todo_vid_names(vid_names)

    try:
        failed_img_names = []
        for i in tqdm.trange(len(vid_names), desc=f"process {process_id}: fitting 3dmm ..."):
            img_name = vid_names[i]
            try:
                is_person_specific_data = ds_name=='nerf'
                success = fit_3dmm_for_a_video(img_name, is_person_specific_data, args.id_mode, args.debug, large_yaw_threshold=args.large_yaw_threshold)
                if not success:
                    failed_img_names.append(img_name)   
            except Exception as e:
                logging.exception('preprocesse fit 3dmm error')
                failed_img_names.append(img_name)
            print(f"finished {i + 1} / {len(vid_names)} = {(i + 1) / len(vid_names):.4f}, failed {len(failed_img_names)} / {i + 1} = {len(failed_img_names) / (i + 1):.4f}")
            sys.stdout.flush()
        print(f"all failed image names: {failed_img_names}")
        print(f"All finished!")
    except Exception as e:
        logging.exception("preprocesse fit 3dmm error")
        return {"status": "error"}

    return {"status": "ok"}

@app.post("/preprocesse_binarize")
def preprocesse_binarize(VIDEO_ID: str):
    """
    Binarize 将数据打包
    """
    ### Process Single Long Audio for NeRF dataset
    face_model = ParametricFaceModel(bfm_folder='deep_3drecon/BFM', 
                camera_distance=10, focal=1015)
    face_model.to("cpu")
    face3d_helper = Face3DHelper()

    binarizer = Binarizer(face_model, face3d_helper)
    try:
        binarizer.parse(VIDEO_ID)
    except Exception as e:
        logging.exception("preprocesse binarize error")
        return {"status": "error"}
    print(f"Binarization for {VIDEO_ID} Done!")

    return {"status": "ok"}

@app.post("/preprocesse")
def preprocesse(VIDEO_PATH: str):
    """
    数据预处理
    """
    # 获取文件名
    VIDEO_ID = os.path.basename(VIDEO_PATH).split('.')[0]
    # 将文件移动到data/raw/videos目录下
    os.system(f'mv {VIDEO_PATH} data/raw/videos/{VIDEO_ID}.mp4')

    try:
        # 对视频进行修剪
        preprocesse_crop(VIDEO_ID)
        # 从视频中提取音频
        preprocesse_extract_audio(VIDEO_ID)
        # 从视频中提取图片
        preprocesseExtractImg = {
            'vid_dir': f'data/raw/videos/{VIDEO_ID}.mp4'
        }
        preprocesse_extract_img(PreprocesseExtractImg(**preprocesseExtractImg))
        # 从视频中提取lm2d_mediapipe
        preprocesseExtractLm2d = {
            'vid_dir': f'data/raw/videos/{VIDEO_ID}.mp4'
        }
        preprocesse_extract_lm2d(PreprocesseExtractLm2d(**preprocesseExtractLm2d))
        # 从视频进行fit 3dmm
        preprocesseFit3dmm = {
            'vid_dir': f'data/raw/videos/{VIDEO_ID}.mp4'
        }
        preprocesse_fit_3dmm(PreprocesseFit3dmm(**preprocesseFit3dmm))
        # 对视频进行打包 Binarize
        preprocesse_binarize(VIDEO_ID)
    except Exception:
        logging.exception("Preprocess Error!!!")
        return {"status": "error"}

    return {"status": "ok"}

def create_lm3d_radnerf_sr(VIDEO_ID: str):
    os.makedirs(f"egs/datasets/{VIDEO_ID}", exist_ok=True)
    
    seq = [
        "base_config:\n",
        "  - ./lm3d_radnerf.yaml\n",
        "\n",
        "task_cls: tasks.radnerfs.radnerf_sr.RADNeRFTask\n",
        "not_save_modules: ['criterion_lpips', 'dual_disc']\n",
        "with_sr: true\n",
        "smo_win_size: 3 # larger leads to oversmoothed lip\n",
        "\n",
        "lpips_mode: vgg19_v2 # alex | vgg19_v2 \n",
        "lambda_lpips_loss: 0.001 # alex 0.01 | vgg19_v2 0.001\n",
        "# lambda_dual_fm: 0.0001 # 0.0 will disable dual Disc and speed up the training\n",
        "lambda_dual_fm: 0.0 # 0.0 will disable dual Disc and speed up the training\n",
        "\n",
        "# Non-face reg loss for alleviate temporal jittering\n",
        "# 科学记数法不能1e-6会被yaml识别成字符串\n",
        "# May：1E-8完全不抖，1E-7女士刘海会抖不自然，1E-6抖动更加明显。但是1E-8画质相比后两者更糊一些。\n",
        "# target_ambient_loss: 3.0e-8  \n",
        "# Obama & Macron & Lieu：1E-6完全不抖，1E-8可能画质或口型会变差。长头发或者面部以外区域多变的人需要调小这个loss target为1E-8，对大多数男人1E-6就好。\n",
        "target_ambient_loss: 1.0e-8\n",
        "lr_lambda_ambient: 0.01\n",
        "lambda_ambient: # 使用target_ambient_loss后，被弃用\n",
        "ambient_loss_mode: mae # mae | mse \n",
        "\n",
        "sr_start_iters: 0_0000\n",
        "lpips_start_iters: 20_0000\n",
        "max_updates: 25_0000\n",
        "num_ckpt_keep: 1\n",
        "add_eye_blink_cond: true # to better control eye blink\n",
        "eye_blink_dim: 2\n",
    ]

    with open(f"egs/datasets/{VIDEO_ID}/lm3d_radnerf_sr.yaml", mode='w+') as file:
        file.writelines(seq)
    
    return {"status": "ok"}

def create_lm3d_radnerf_torso_sr(VIDEO_ID:str):
    os.makedirs(f"egs/datasets/{VIDEO_ID}", exist_ok=True)

    seq = [
        "base_config:\n",
        "  - ./lm3d_radnerf_torso.yaml\n",
        "\n",
        "task_cls: tasks.radnerfs.radnerf_torso_sr.RADNeRFTorsoTask\n",
        "head_model_dir: checkpoints/1007_radnerf_sr/lm3d_radnerf_sr\n",
        "not_save_modules: ['criterion_lpips', 'dual_disc']\n",
        "with_sr: true\n",
        "torso_head_aware: true\n",
        "individual_embedding_dim: 4\n",
        "torso_individual_embedding_dim: 8 # 0 leads to worse image fidelity\n",
        "smo_win_size: 3 # larger leads to oversmoothed lip\n",
        "\n",
        "num_ckpt_keep: 1\n",
        "lpips_mode: vgg19_v2 # alex | vgg19_v2 \n",
        "lambda_lpips_loss: 0.001 # alex 0.001 | vgg19_v2 0.0001\n",
        "lambda_ambient: 1. \n",
        "lambda_torso_deform: 0. # 1.0e-3 # L1 reg deformation torso, slight scale can improve stability\n",
        "lambda_weights_entropy: 1.0e-4 # enlarge from 1e-4 leads to quick converged to 0\n",
        "\n",
        "# the cause of NaN is the changed camera convention, the trans_z out of input scale of freqencoder.\n",
        "# we change it into lm2ds and worked it out\n",
        "sr_start_iters: 0_0000\n",
        "lpips_start_iters: 20_0000\n",
        "max_updates: 25_0000\n",
        "clip_grad_norm: 0. # 1. in fp16 leads to nan\n",
        "add_eye_blink_cond: true # to better control eye blink\n",
        "eye_blink_dim: 2\n",
    ]

    with open(f"egs/datasets/{VIDEO_ID}/lm3d_radnerf_torso_sr.yaml", mode="w+") as file:
        file.writelines(seq)
    
    return {"status": "ok"}

def create_lm3d_radnerf_torso(VIDEO_ID):
    os.makedirs(f"egs/datasets/{VIDEO_ID}", exist_ok=True)

    seq = [
        "base_config:\n",
        "  - egs/egs_bases/radnerf/lm3d_radnerf.yaml\n",
        "\n",
        f"video_id: {VIDEO_ID} # the video file should be located at `data/raw/videos/<video_id>.mp4`\n",
        "task_cls: tasks.radnerfs.radnerf_torso.RADNeRFTorsoTask\n",
        f"head_model_dir: checkpoints/{VIDEO_ID}/lm3d_radnerf\n",
        "\n",
        "torso_train_mode: 1\n",
        "\n",
        "amp: true\n",
        "nerf_keypoint_mode: lm68 # lm68 | lm131 | lm468\n",
        "cond_out_dim: 64\n",
        "cond_dropout_rate: 0.\n",
        "individual_embedding_dim: 4 # 32\n",
        "hidden_dim_sigma: 128 # 64 by radnerf is too small\n",
        "geo_feat_dim: 128 # 64 by radnerf is too small\n",
        "num_layers_color: 2 # 2\n",
        "hidden_dim_color: 128 # 64 by radnerf is too small\n",
        "num_layers_ambient: 3 # 3\n",
        "hidden_dim_ambient: 128 # 64 by radnerf is too small\n",
        "lambda_ambient: 1. \n",
        "polygon_face_mask: true\n",
        "n_rays: 65536 # 262144 # 65536 # num rays sampled per image for each training step, default 256*256\n",
        "clip_grad_norm: 0. # 1. in fp16 leads to nan\n",
    ]

    with open(f"egs/datasets/{VIDEO_ID}/lm3d_radnerf_torso.yaml", mode="w+") as file:
        file.writelines(seq)
    
    return {"status": "ok"}

def create_lm3d_radnerf(VIDEO_ID: str):
    os.makedirs(f"egs/datasets/{VIDEO_ID}", exist_ok=True)

    seq = [
        "base_config:\n",
        "  - egs/egs_bases/radnerf/lm3d_radnerf.yaml\n",
        "\n",
        f"video_id: {VIDEO_ID} # the video file should be located at `data/raw/videos/<video_id>.mp4`\n",
        "amp: true\n",
        "\n",
        "nerf_keypoint_mode: lm68 # lm68 | lm131 | lm468\n",
        "\n",
        "cond_out_dim: 64\n",
        "cond_dropout_rate: 0.\n",
        "\n",
        "individual_embedding_dim: 4 # 32\n",
        "hidden_dim_sigma: 128 # 64 by radnerf is too small\n",
        "geo_feat_dim: 128 # 64 by radnerf is too small\n",
        "num_layers_color: 2 # 2\n",
        "hidden_dim_color: 128 # 64 by radnerf is too small\n",
        "num_layers_ambient: 3 # 3\n",
        "hidden_dim_ambient: 128 # 64 by radnerf is too small\n",
        "lambda_ambient: 1. \n",
        "polygon_face_mask: true\n",
        "\n",
        "n_rays: 65536 # 262144 # 65536 # num rays sampled per image for each training step, default 256*256\n",
        "\n",
        "clip_grad_norm: 0. # 1. in fp16 leads to nan\n",
        "\n",
        "# to tune scale\n",
        "# https://github.com/NVlabs/instant-ngp/blob/master/docs/nerf_dataset_tips.md\n",
        "# https://github.com/ashawkey/torch-ngp/issues/112\n",
        "# The occupancy grid works fine in LEGO dataset. (~10x accelerated)\n",
        "# In my experiment (and on my dataset), I found that occupancy grid sampling is vulnerable to scale.\n",
        "# In specific scale range, the occ grid sampling works and accelerates rendering.\n",
        "# But outside of that range, the acceleration gain disappears, or it fails to converge at all.\n",
        "# (Without the occ grid sampling, the model has learned the scene in that scales.)\n",
        "# I think this is reasonable because covering the the camera-viewed region with a predefined grid \n",
        "# is easier to fail than sampling without grids.\n",
        "# With an manual scale tuning, I can get the expected acceleration gain.\n",
    ]

    with open(f"egs/datasets/{VIDEO_ID}/lm3d_radnerf.yaml", mode="w+") as file:
        file.writelines(seq)
    
    return {"status": "ok"}

@app.post("/train_head")
def train_head(param: TrainParam):
    """
    训练 Head NeRF 模型
    """
    if param.config is None or param.config == "":
        # raise Exception("config can not be empty")
        logging.error("config can not be empty")
        return {"status": "error"}

    if param.exp_name is None or param.exp_name == "":
        # raise Exception("exp_name can not be empty")
        logging.error("exp_name can not be empty")
        return {"status": "error"}

    set_web_train_param(param)
    run_task()

    return {"status": "ok"}

@app.post("/train_torso")
def train_torso(param: TrainParam):
    """
    训练 Torso NeRF 模型
    """
    if param.config is None or param.config == "":
        # raise Exception("config can not be empty")
        logging.error("config can not be empty")
        return {"status": "error"}

    if param.exp_name is None or param.exp_name == "":
        # raise Exception("exp_name can not be empty")
        logging.error("exp_name can not be empty")
        return {"status": "error"}

    if param.hparams is None or param.hparams == "":
        # raise Exception("hparam can not be empty in torso stage.")
        logging.error("hparam can not be empty in torso stage.")
        return {"status": "error"}


    set_web_train_param(param)
    run_task()

    return {"status": "ok"}

@app.post("/train")
def train(VIDEO_ID: str):
    """
    训练模型
    """
    try:
        # 创建配置文件
        create_lm3d_radnerf_sr(VIDEO_ID)
        create_lm3d_radnerf_torso_sr(VIDEO_ID)
        create_lm3d_radnerf_torso(VIDEO_ID)
        create_lm3d_radnerf(VIDEO_ID)

        # 训练 Head NeRF 模型
        trainHead_Param = {
            'config': f'egs/datasets/{VIDEO_ID}/lm3d_radnerf_sr.yaml',
            'exp_name': f'motion2Video_nerf/{VIDEO_ID}_head',
            'reset': True,
        }
        train_head(TrainParam(**trainHead_Param))
        # 训练 Torso NeRF 模型
        trainTroso_Param = {
            'config': f'egs/datasets/{VIDEO_ID}/lm3d_radnerf_torso_sr.yaml',
            'exp_name': f'motion2video_nerf/{VIDEO_ID}_torso',
            'hparams': f'head_model_dir=checkpoints/motion2Video_nerf/{VIDEO_ID}_head',
            'reset': True,
        }
        train_torso(TrainParam(**trainTroso_Param))
    except Exception:
        logging.exception("Train Error!!!")
        return {"status": "error"}

    return {"status": "ok"}

if __name__ == "__main__":
    uvicorn.run(app, host="127.0.0.1", port=9002)