import numpy as np
import torch
from torch.optim import Adam
from tqdm import tqdm
import pickle


def train(
    model,
    config,
    train_loader,
    valid_loader=None,
    valid_epoch_interval=300,
    foldername="",
):
    torch.manual_seed(0)
    np.random.seed(0)
    optimizer = Adam(model.parameters(), lr=config["lr"], weight_decay=1e-6)
    if foldername != "":
        output_path = foldername + "/model.pth"

    p0 = int(0.25 * config["epochs"])
    p1 = int(0.5 * config["epochs"])
    p2 = int(0.75 * config["epochs"])
    p3 = int(0.9 * config["epochs"])
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=[p0, p1, p2, p3], gamma=0.1
    )

    best_valid_loss = 1e10
    for epoch_no in range(config["epochs"]):
        avg_loss = 0
        model.train()
        with tqdm(train_loader, mininterval=5.0, maxinterval=50.0) as it:
            for batch_no, train_batch in enumerate(it, start=1):
                optimizer.zero_grad()
                loss = model(train_batch)
                loss.backward()
                avg_loss += loss.item()
                optimizer.step()
                it.set_postfix(
                    ordered_dict={
                        "avg_epoch_loss": avg_loss / batch_no,
                        "epoch": epoch_no,
                    },
                    refresh=False,
                )
            lr_scheduler.step()

        if valid_loader is not None and (epoch_no + 1) % valid_epoch_interval == 0:
            print("Start validation")
            model.eval()
            avg_loss_valid = 0
            with torch.no_grad():
                with tqdm(valid_loader, mininterval=5.0, maxinterval=50.0) as it:
                    for batch_no, valid_batch in enumerate(it, start=1):
                        loss = model(valid_batch)
                        avg_loss_valid += loss.item()
                        it.set_postfix(
                            ordered_dict={
                                "avg_valid_loss": avg_loss_valid / batch_no,
                                "epoch": epoch_no,
                            },
                            refresh=True,
                        )
            if len(valid_loader) > 0:
                avg_loss_valid /= len(valid_loader)
                if avg_loss_valid < best_valid_loss:
                    best_valid_loss = avg_loss_valid
                    if foldername != "":
                        torch.save(model.state_dict(), output_path)
            else:
                print("Warning: Validation loader is empty. Skipping validation.")
    if foldername != "":
        torch.save(model.state_dict(), output_path)

    # Use folloing code for saving training history.
    # with open(foldername+'/saved_history.pkl', 'wb') as f:
    #     pickle.dump(history, f)


def evaluate(model, test_loader, nsample=100, scaler=1, foldername=""):
    torch.manual_seed(0)
    np.random.seed(0)

    with torch.no_grad():
        model.eval()
        mse_total = None
        evalpoints_total = None

        with tqdm(test_loader, mininterval=5.0, maxinterval=50.0) as it:
            for batch_no, test_batch in enumerate(it, start=1):
                test_batch = {k: v.to(model.device) for k, v in test_batch.items()}
                output = model.evaluate(test_batch, nsample)

                samples, c_target, eval_points, observed_points, observed_time = output
                print(
                    f"Input shapes - samples: {samples.shape}, c_target: {c_target.shape}, eval_points: {eval_points.shape}")

                # 确保正确的维度顺序 [batch, length, features]
                if c_target.dim() == 3 and c_target.shape[1] == 1:
                    c_target = c_target.squeeze(1)  # [14, 15]
                if eval_points.dim() == 3 and eval_points.shape[1] == 1:
                    eval_points = eval_points.squeeze(1)  # [14, 15]

                # samples应该是 [batch, nsample, features]
                if samples.dim() == 4:
                    samples = samples.squeeze(2)  # 移除长度维度 [14, 100, 15]

                # 初始化累加器
                if mse_total is None:
                    K = c_target.shape[-1]  # 应该是15
                    mse_total = torch.zeros(K, device=model.device)
                    evalpoints_total = torch.zeros(K, device=model.device)
                    print(f"Initialized accumulators for {K} features")

                if eval_points.sum().item() == 0:
                    print(f"Skipping batch {batch_no}: No evaluation points")
                    continue

                # 计算样本均值 [batch, features]
                samples_mean = samples.mean(dim=1)  # [14, 15]
                print(f"samples_mean shape: {samples_mean.shape}")

                # 反归一化
                if hasattr(model, 'max_values') and hasattr(model, 'min_values'):
                    range_values = (model.max_values - model.min_values).to(model.device)
                    samples_mean = samples_mean * range_values + model.min_values
                    c_target = c_target * range_values + model.min_values

                # 计算MSE [batch, features]
                squared_error = (samples_mean - c_target) ** 2  # [14, 15]
                print(f"squared_error shape: {squared_error.shape}")

                # 应用评估点掩码 [14, 15] * [14, 15] = [14, 15]
                mse_current = (squared_error * eval_points).sum(dim=0)  # [15]
                eval_points_sum = eval_points.sum(dim=0)  # [15]

                print(f"mse_current shape: {mse_current.shape}, should be [{K}]")
                print(f"eval_points_sum shape: {eval_points_sum.shape}, should be [{K}]")

                # 累加
                mse_total += mse_current
                evalpoints_total += eval_points_sum

                print("反归一化前 samples_mean 范围:", samples_mean.min().item(), samples_mean.max().item())
                print("max_values - min_values:", (model.max_values - model.min_values))
                print("反归一化后 samples_mean_values 范围:", model.max_values,model.min_values)
                # 计算当前RMSE
                valid_features = evalpoints_total > 0
                if valid_features.any():
                    current_rmse = torch.sqrt(
                        (mse_total[valid_features] / evalpoints_total[valid_features]).mean()
                    ).item()
                else:
                    current_rmse = float('nan')

                it.set_postfix(
                    ordered_dict={
                        "rmse_total": current_rmse,
                        "batch_no": batch_no,
                    },
                    refresh=True,
                )

        # 最终计算
        if evalpoints_total is not None and evalpoints_total.sum() > 0:
            valid_features = evalpoints_total > 0
            feature_rmse = torch.sqrt(mse_total[valid_features] / evalpoints_total[valid_features])
            final_rmse = feature_rmse.mean().item()
            print(f"Final RMSE: {final_rmse}")

            if foldername:
                with open(foldername + f"/result_nsample{nsample}.pk", "wb") as f:
                    pickle.dump([final_rmse], f)
        else:
            print("Warning: No valid evaluation points")
            final_rmse = float('nan')

def revise_evaluate(model, test_loader, nsample=100, scaler=1, foldername=""):
    torch.manual_seed(0)
    np.random.seed(0)

    with torch.no_grad():
        model.eval()
        mse_total = 0
        mae_total = 0
        evalpoints_total = 0

        with tqdm(test_loader, mininterval=5.0, maxinterval=50.0) as it:
            for batch_no, test_batch in enumerate(it, start=1):
                # 确保数据在正确设备上
                test_batch = {k: v.to(model.device) for k, v in test_batch.items()}

                output = model.evaluate(test_batch, nsample)
                samples, c_target, eval_points, observed_points, observed_time = output
                samples = samples.permute(0, 1, 3, 2)  # (B,nsample,L,K)

                c_target = c_target.permute(0, 2, 1)  # (B,L,K)
                eval_points = eval_points.permute(0, 2, 1)
                observed_points = observed_points.permute(0, 2, 1)

                # 检查 eval_points 的有效性
                if eval_points.sum().item() == 0:
                    print(f"跳过batch {batch_no}: 无有效评估点")
                    continue

                # 计算中位数预测
                samples_mean = samples.mean(dim=1)

                # 在数据加载时确保eval_points维度匹配
                eval_points = eval_points.permute(0, 2, 1)  # 从[14,15,1]转为[14,1,15]
                eval_points = eval_points.expand(-1, 15, -1)  # 扩展为[14,15,15]

                # 在evaluate()函数中修改反归一化部分
                if hasattr(model, 'max_values') and hasattr(model, 'min_values'):
                    # 按特征维度进行反归一化
                    range_values = (model.max_values - model.min_values)
                    samples_mean_values = samples_mean * range_values + model.min_values
                    c_target = c_target * range_values + model.min_values

                    # 对每个特征单独计算误差，避免大范围特征主导
                    mse_current = torch.zeros_like(samples_mean_values)
                    for k in range(samples_mean_values.shape[-1]):  # 遍历每个特征
                        feature_diff = (samples_mean_values[..., k] - c_target[..., k]) * eval_points[..., k]
                        mse_current[..., k] = feature_diff ** 2
                else:
                    samples_mean_values = samples_mean
                    mse_current = ((samples_mean_values - c_target) * eval_points) ** 2
                    mae_current = torch.abs((samples_mean_values - c_target) * eval_points)

                # 检查数据合法性
                if torch.isnan(samples_mean_values).any() or torch.isnan(c_target).any():
                    print(f"跳过batch {batch_no}: 包含nan值")
                    continue

                # 计算误差
                eval_points = eval_points.expand_as(c_target)  # 将eval_points从[14,15,1]扩展为[14,15,15]
                print(mse_current)

                # 累加统计量
                mse_total += mse_current.sum(dim=0)
                print(mse_total)
                # mae_total += mae_current.sum().item()
                evalpoints_total += eval_points.sum().item()

                print("反归一化前 samples_mean 范围:", samples_mean.min().item(), samples_mean.max().item())
                print("max_values - min_values:", (model.max_values - model.min_values))
                print("反归一化后 samples_mean_values 范围:", samples_mean_values.min().item(),
                      samples_mean_values.max().item())

                print("samples_mean_values shape:", samples_mean_values.shape)
                print("c_target shape:", c_target.shape)
                print("eval_points shape:", eval_points.shape)

                # 更新进度条
                if evalpoints_total > 0:
                    current_rmse = torch.sqrt(torch.tensor(mse_total / evalpoints_total)).item()
                else:
                    current_rmse = float('nan')

                it.set_postfix(
                    ordered_dict={
                        "rmse_total": current_rmse,
                        "batch_no": batch_no,
                    },
                    refresh=True,
                )

        if evalpoints_total > 0:
            # Calculate feature-wise eval points
            feature_evalpoints = eval_points.sum(dim=[0, 1])  # Sum across batch and time dimensions
            valid_features = feature_evalpoints > 0

            # Calculate feature-wise RMSE only for features with eval points
            feature_rmse = torch.sqrt(mse_total[valid_features] / feature_evalpoints[valid_features])
            final_rmse = feature_rmse.mean().item()

        print(f"最终RMSE: {final_rmse}")

        if foldername:
            with open(foldername + f"/result_nsample{nsample}.pk", "wb") as f:
                pickle.dump([final_rmse], f)


def evaluate_analog(
    exe_name, model, test_loader, nsample=100, scaler=1, mean_scaler=0, foldername=""
):

    if exe_name == "census":
        with open("./data_census_analog/transform.pk", "rb") as f:
            _, cont_cols, saved_cat_dict = pickle.load(f)

    torch.manual_seed(0)
    np.random.seed(0)
    with torch.no_grad():
        model.eval()
        mse_total = 0
        mae_total = 0
        err_total = np.zeros([len(saved_cat_dict)])
        err_total_eval_nums = np.zeros([len(saved_cat_dict)])
        evalpoints_total = 0

        all_target = []
        all_observed_point = []
        all_observed_time = []
        all_evalpoint = []
        all_generated_samples = []

        with tqdm(test_loader, mininterval=5.0, maxinterval=50.0) as it:
            for batch_no, test_batch in enumerate(it, start=1):
                output = model.evaluate(test_batch, nsample)
                samples, c_target, eval_points, observed_points, observed_time = output
                samples = samples.permute(0, 1, 3, 2)  # (B,nsample,L,K)
                c_target = c_target.permute(0, 2, 1)  # (B,L,K)
                eval_points = eval_points.permute(0, 2, 1)
                observed_points = observed_points.permute(0, 2, 1)
                samples_median = samples.median(dim=1)  # (B, L, K)

                # Threashold
                for i in range(samples_median.values.size(dim=1)):
                    if i in cont_cols:
                        continue
                    index1 = samples_median.values[:, i, :] >= 0
                    samples_median.values[:, i, :][index1] = 1
                    index2 = samples_median.values[:, i, :] < 0
                    samples_median.values[:, i, :][index2] = -1

                all_target.append(c_target)
                all_evalpoint.append(eval_points)
                all_observed_point.append(observed_points)
                all_observed_time.append(observed_time)
                all_generated_samples.append(samples)

                # for continous variables
                mse_current = (
                    (
                        (
                            samples_median.values[:, cont_cols, :]
                            - c_target[:, cont_cols, :]
                        )
                        * eval_points[:, cont_cols, :]
                    )
                    ** 2
                ) * (scaler**2)
                mae_current = (
                    torch.abs(
                        (
                            samples_median.values[:, cont_cols, :]
                            - c_target[:, cont_cols, :]
                        )
                        * eval_points[:, cont_cols, :]
                    )
                ) * scaler

                # for categorical variables
                for i in range(len(saved_cat_dict)):
                    cate_cols = saved_cat_dict[str(i)]
                    matched_nums = (
                        (
                            (
                                samples_median.values[:, cate_cols, :]
                                == c_target[:, cate_cols, :]
                            )
                            * eval_points[:, cate_cols, :]
                        )
                        .all(1)
                        .sum()
                    )
                    eval_nums = eval_points[:, cate_cols, :].sum().sum() / len(
                        cate_cols
                    )
                    err_total[i] += eval_nums - matched_nums
                    err_total_eval_nums[i] += eval_nums

                mse_total += torch.sum(mse_current, dim=0)
                # mae_total += torch.sum(mae_current, dim=0)
                evalpoints_total += torch.sum(eval_points[:, cont_cols, 0], dim=0)


                it.set_postfix(
                    ordered_dict={
                        "rmse_total": torch.mean(
                            torch.sqrt(torch.div(mse_total, evalpoints_total))
                        ).item(),
                        "batch_no": batch_no,
                    },
                    refresh=True,
                )

            with open(foldername + "/result_nsample" + str(nsample) + ".pk", "wb") as f:
                pickle.dump(
                    [
                        torch.mean(torch.sqrt(mse_total / evalpoints_total)).item(),
                        # mae_total / evalpoints_total,
                        err_total / err_total_eval_nums,
                    ],
                    f,
                )
                print(
                    "RMSE:",
                    torch.mean(
                        torch.sqrt(torch.div(mse_total, evalpoints_total))
                    ).item(),
                )
                print("ERR_CATE:", err_total / err_total_eval_nums)


def evaluate_onehot(
    exe_name, model, test_loader, nsample=100, scaler=1, mean_scaler=0, foldername=""
):

    if exe_name == "census":
        with open("./data_census_onehot/transformed_columns.pk", "rb") as f:
            cont_cols, saved_cat_dict = pickle.load(f)
        print(cont_cols, saved_cat_dict)
        with open("./data_census_onehot/encoder.pk", "rb") as f:
            encoder = pickle.load(f)
    print(cont_cols, saved_cat_dict)
    torch.manual_seed(0)
    np.random.seed(0)
    with torch.no_grad():
        model.eval()
        mse_total = 0
        mae_total = 0
        err_total = np.zeros([len(saved_cat_dict)])
        err_total_eval_nums = np.zeros([len(saved_cat_dict)])
        evalpoints_total = 0

        all_target = []
        all_observed_point = []
        all_observed_time = []
        all_evalpoint = []
        all_generated_samples = []

        with tqdm(test_loader, mininterval=5.0, maxinterval=50.0) as it:
            for batch_no, test_batch in enumerate(it, start=1):

                output = model.evaluate(test_batch, nsample)

                samples, c_target, eval_points, observed_points, observed_time = output
                samples = samples.permute(0, 1, 3, 2)  # (B,nsample,L,K)
                c_target = c_target.permute(0, 2, 1)  # (B,L,K)
                eval_points = eval_points.permute(0, 2, 1)
                observed_points = observed_points.permute(0, 2, 1)
                samples_median = samples.median(dim=1)  # (B, L, K)

                for i in range(c_target.shape[1]):
                    if i in cont_cols:
                        continue
                    index1 = c_target[:, i, :] >= 0
                    c_target[:, i, :][index1] = 1
                    index2 = c_target[:, i, :] < 0
                    c_target[:, i, :][index2] = 0

                all_target.append(c_target)
                all_evalpoint.append(eval_points)
                all_observed_point.append(observed_points)
                all_observed_time.append(observed_time)
                all_generated_samples.append(samples)

                # for continous variables
                mse_current = (
                    (
                        (samples_median.values[:, cont_cols] - c_target[:, cont_cols])
                        * eval_points[:, cont_cols, :]
                    )
                    ** 2
                ) * (scaler**2)
                mae_current = (
                    torch.abs(
                        (samples_median.values[:, cont_cols] - c_target[:, cont_cols])
                        * eval_points[:, cont_cols, :]
                    )
                ) * scaler

                # for categorical variables
                for index, i in enumerate(list(saved_cat_dict.keys())):
                    cate_cols = saved_cat_dict[str(i)]
                    matched_nums = (
                        (
                            torch.argmax(samples_median.values[:, cate_cols], dim=1)
                            == torch.argmax(c_target[:, cate_cols], dim=1)
                        )
                        * eval_points[:, cate_cols[0], :]
                    ).sum()
                    eval_nums = eval_points[:, cate_cols, :].sum().sum() / len(
                        cate_cols
                    )
                    eval_nums = eval_nums.cuda(matched_nums.device)
                    err_total[index] += eval_nums - matched_nums
                    err_total_eval_nums[index] += eval_nums
                mse_total += torch.sum(mse_current, dim=0)
                mae_total += torch.sum(mae_current, dim=0)
                evalpoints_total += torch.sum(eval_points[:, cont_cols, 0], dim=0)
                it.set_postfix(
                    ordered_dict={
                        "rmse_total": torch.mean(
                            torch.sqrt(torch.div(mse_total, evalpoints_total))
                        ),
                        "batch_no": batch_no,
                    },
                    refresh=True,
                )
                print(
                    np.mean(err_total / err_total_eval_nums),
                    err_total / err_total_eval_nums,
                )

            with open(foldername + "/result_nsample" + str(nsample) + ".pk", "wb") as f:
                pickle.dump(
                    [
                        torch.mean(torch.sqrt(torch.div(mse_total, evalpoints_total))),
                        err_total / err_total_eval_nums,
                    ],
                    f,
                )
                print(
                    "RMSE:",
                    torch.mean(torch.sqrt(torch.div(mse_total, evalpoints_total))),
                )
                print("ERR_CATE:", err_total / err_total_eval_nums)


def evaluate_ft(
    exe_name, model, test_loader, nsample=100, scaler=1, mean_scaler=0, foldername=""
):

    if exe_name == "census":
        with open("./data_census_ft/transformed_columns.pk", "rb") as f:
            cont_list, num_cate_list = pickle.load(f)
        with open("./data_census_ft/encoder.pk", "rb") as f:
            encoder = pickle.load(f)

    print(cont_list, num_cate_list)
    torch.manual_seed(0)
    np.random.seed(0)
    with torch.no_grad():
        model.eval()
        mse_total = 0
        mae_total = 0
        err_total = np.zeros([len(num_cate_list)])
        err_total_eval_nums = np.zeros([len(num_cate_list)])
        evalpoints_total = 0

        all_target = []
        all_observed_point = []
        all_observed_time = []
        all_evalpoint = []
        all_generated_samples = []

        with tqdm(test_loader, mininterval=5.0, maxinterval=50.0) as it:
            for batch_no, test_batch in enumerate(it, start=1):

                output = model.evaluate(test_batch, nsample)
                samples, c_target, eval_points, observed_points, observed_time = output
                samples = samples.permute(0, 1, 3, 2)  # (B,nsample,L,K)
                c_target = c_target.permute(0, 2, 1)  # (B,L,K)
                eval_points = eval_points.permute(0, 2, 1)
                observed_points = observed_points.permute(0, 2, 1)

                # take the median from samples.
                samples_median = samples.median(dim=1)  # (B, L, K)

                samples_median = model.tokenizer.recover(
                    samples_median.values, len(cont_list)
                )
                c_target = model.tokenizer.recover(c_target, len(cont_list))

                all_target.append(c_target)
                all_evalpoint.append(eval_points)
                all_observed_point.append(observed_points)
                all_observed_time.append(observed_time)
                all_generated_samples.append(samples)

                # for continous variables
                mse_current = (
                    (
                        (samples_median[:, cont_list] - c_target[:, cont_list])
                        * eval_points[:, cont_list, 0]
                    )
                    ** 2
                ) * (scaler**2)
                mae_current = (
                    torch.abs(
                        (samples_median[:, cont_list] - c_target[:, cont_list])
                        * eval_points[:, cont_list, 0]
                    )
                ) * scaler

                # for categorical variables
                for i in range(len(num_cate_list)):
                    matched_nums = (
                        samples_median[:, len(cont_list) + i]
                        == c_target[:, len(cont_list) + i]
                        * eval_points[:, len(cont_list) + i, 0]
                    ).sum()
                    eval_nums = eval_points[:, len(cont_list) + i, 0].sum()
                    err_total[i] += eval_nums - matched_nums
                    err_total_eval_nums[i] += eval_nums

                mse_total += torch.sum(mse_current, dim=0)
                mae_total += torch.sum(mae_current, dim=0)
                evalpoints_total += torch.sum(eval_points[:, cont_list, 0], dim=0)

                it.set_postfix(
                    ordered_dict={
                        "rmse_total": torch.mean(
                            torch.sqrt(torch.div(mse_total, evalpoints_total))
                        ).item(),
                        "batch_no": batch_no,
                    },
                    refresh=True,
                )

            with open(foldername + "/result_nsample" + str(nsample) + ".pk", "wb") as f:
                pickle.dump(
                    [
                        torch.mean(
                            torch.sqrt(torch.div(mse_total, evalpoints_total))
                        ).item(),
                        err_total / err_total_eval_nums,
                    ],
                    f,
                )
                print(
                    "RMSE:",
                    torch.mean(
                        torch.sqrt(torch.div(mse_total, evalpoints_total))
                    ).item(),
                )
                print("ERR_CATE:", err_total / err_total_eval_nums)