from DEAD.AutoDecoder.Evaluation.Decoding import setup_dead_model
from DDPM.Evaluation.InternalBallistic import convert_av1_to_value
from OPT.Config import *
import numpy as np
import torch
import matplotlib.pyplot as plt
from torch.optim import Adam
from Utility.PlotCurve import plot_curves_flow
from tqdm import tqdm
from OPT.Objective import objective_function
from torch.amp import autocast, GradScaler
from OPT.Evaluation.Decoding import start_decode
from OPT.Evaluation.InternalBallistic import start_internal_ballistic_cal
from OPT.Evaluation.Analysis import start_analysis
import math

def start_optimization(control_params,dead_dic):
    result_path=control_params["result_path"]
    samples_path= control_params["samples_path"]
    sample_npz_name=control_params["sample_npz_name"]
    opt_index=control_params["opt_index"]
    variables=control_params["variables"]
    print(f"Start Optimization at {result_path}")

    npz_file = np.load(f"{samples_path}/{sample_npz_name}.npz")

    lv = torch.from_numpy(npz_file["lv"][opt_index]).cuda().unsqueeze(0)
    gv = torch.from_numpy(npz_file["gv"][opt_index]).cuda().unsqueeze(0)
    av1 = torch.from_numpy(npz_file["av1"][opt_index]).cuda().unsqueeze(0)
    lv.requires_grad = True
    av1.requires_grad = True
    gv.requires_grad = True

    t_array_target = npz_file["t_array_target"]
    p_array_target = npz_file["p_array_target"]
    cstar = npz_file["cstar"]
    rho = npz_file["rho"]
    n = npz_file["n"]
    At = npz_file["At"]

    t_array_target = torch.tensor(
        t_array_target, device="cuda", dtype=torch.float32)
    p_array_target = torch.tensor(
        p_array_target, device="cuda", dtype=torch.float32)
    cstar = torch.tensor(cstar, device="cuda", dtype=torch.float32)
    rho = torch.tensor(rho, device="cuda", dtype=torch.float32)
    n = torch.tensor(n, device="cuda", dtype=torch.float32)
    At = torch.tensor(At, device="cuda", dtype=torch.float32)

    lr_init = control_params["learning_rate"]
    lr_final = 0

    params_list = []
    if 'L' in variables:
        params_list.append({'params': lv, 'lr': lr_init})
    if 'G' in variables:
        params_list.append({'params': gv, 'lr': lr_init})
    if 'A' in variables:
        params_list.append({'params': av1, 'lr': lr_init*0.1})

    optimizer = Adam(params_list, weight_decay=0.0)
    optimizer.zero_grad()

    dead = setup_dead_model(dead_dic["dead_hidden_size"], dead_dic["dead_depth"],
                            dead_dic["dead_data_num"], dead_dic["dead_latent_size"],
                            dead_dic["dead_load_path"])

    plt.figure(figsize=(10, 6))
    # 初始化 GradScaler
    scaler = GradScaler()
    lv_list = []
    gv_list = []
    av1_list = []
    r_ref_list = []
    R_list = []
    r2_list=[]
    loading_fraction_list=[]
    iteration_num = control_params["iteration_num"]
    # opt循环，开始优化
    tqdm_opt = tqdm(range(iteration_num), desc='epoches   ',
                    colour='red', dynamic_ncols=True)
    for i in tqdm_opt:
        # 计算当前学习率（线性衰减）
        #current_lr = lr_init - (lr_init - lr_final) * (i / iteration_num)
        # 计算当前学习率（余弦退火）
        current_lr = lr_final + 0.5 * (lr_init - lr_final) * (1 + math.cos(math.pi * i / iteration_num))

        # 更新优化器的学习率      
        for param_group in optimizer.param_groups:
            if param_group['params'][0] is av1:
                param_group['lr'] = current_lr * 0.1
            else:
                param_group['lr'] = current_lr

        optimizer.zero_grad()
        # 前向传播：使用 autocast 包装
        with autocast(device_type='cuda'):
            loss_value, t_vector, p_vector, r2, loading_fraction= objective_function(
                dead, control_params, t_array_target, p_array_target, cstar, rho, n, At, lv, gv, av1)
        # 展示并保存结果
        if i % 10 == 0:
            plot_curves_flow(title='Chamber Pressure vs Burned time',
                             xlabel='t /s',
                             ylabel='p /MPa',
                             x=np.array(t_vector.detach().cpu()).T,
                             y=np.array(p_vector.detach().cpu()).T*1e-6,
                             ref_x=t_array_target.cpu().numpy(),
                             ref_y=p_array_target.cpu().numpy()*1e-6,
                             filename=None)
            lv_list.append(lv[0].detach().cpu().numpy())
            gv_list.append(gv[0].detach().cpu().numpy())
            av1_list.append(av1[0].detach().cpu().numpy())
            r_ref, R = convert_av1_to_value(av1[0].detach().cpu().numpy())
            r_ref_list.append(r_ref)
            R_list.append(R)
            r2_list.append(r2.detach().cpu().numpy())
            loading_fraction_list.append(loading_fraction.detach().cpu().numpy())

            np.savez_compressed(f"{result_path}/Optimization.npz",
                                lv=lv_list,
                                gv=gv_list,
                                av1=av1_list,
                                r_ref=r_ref_list,
                                R=R_list,
                                cstar=cstar.cpu().numpy(),
                                rho=rho.cpu().numpy(),
                                n=n.cpu().numpy(),
                                At=At.cpu().numpy(),
                                t_array_target=t_array_target.cpu().numpy(),
                                p_array_target=p_array_target.cpu().numpy(),
                                r2=r2_list,
                                loading_fraction=loading_fraction_list)

        # 反向传播：使用 scaler 缩放损失值
        scaler.scale(loss_value).backward()
        if 'G' in variables:
            # 手动将 gv[0][1] 和 gv[0][2] 的梯度置为 0，也就是不优化n_slot和m
            #gv.grad[0][0] = 0
            gv.grad[0][1] = 0
            gv.grad[0][2] = 0
        # 优化器更新：使用 scaler 缩放梯度
        scaler.step(optimizer)
        # 更新 scaler 的状态
        scaler.update()
        # 记录损失值
        tqdm_opt.set_postfix(loss='{:.5f}'.format(loss_value.item()))
    plt.close()

if __name__ == '__main__':
    #define =make_define("dicG_plus",-1,0.001,"LGA","Optimization")
    #define =make_define("dicT3_plus",-1,0.0005,"LGA","Optimization")
    #define =make_define("dicA_22",-1,0.0002,"LGA","Optimization")
    define =make_define("dicA",1,0.001,"LGA","Samples_clustered")
    control_params, dead_dic = get_control_params(define)

    start_optimization(control_params,dead_dic)
    start_decode(control_params)
    start_internal_ballistic_cal(control_params)