import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.metrics import mean_squared_error, mean_absolute_error
import yaml
from types import SimpleNamespace
from sklearn.preprocessing import StandardScaler
import importlib
import matplotlib.pyplot as plt
import sys


def evaluate(y_true, y_pred):
    mse = mean_squared_error(y_true, y_pred)
    mae = mean_absolute_error(y_true, y_pred)
    return {"mse": mse, "mae": mae}


def train(
    model,
    inputs,
    labels,
    epochs=10,
    batch_size=32,
    lr=1e-3,
    device="cuda" if torch.cuda.is_available() else "cpu",
):
    model = model.to(device)
    dataset = torch.utils.data.TensorDataset(inputs, labels)
    dataloader = torch.utils.data.DataLoader(
        dataset, batch_size=batch_size, shuffle=True
    )

    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    model.train()
    for epoch in range(epochs):
        for x_batch, y_batch in dataloader:
            x_batch = x_batch.to(device)
            y_batch = y_batch.to(device)

            optimizer.zero_grad()
            preds = model(x_batch)
            loss = criterion(preds, y_batch)
            loss.backward()
            optimizer.step()


def prepare_data_blc(price_file_path, tech_file_path, input_window=30, pred_window=1):
    price_df = pd.read_csv(price_file_path)
    tech_df = pd.read_csv(tech_file_path)
    df = pd.concat([price_df, tech_df], axis=1).dropna()

    price_cols = price_df.columns.tolist()
    tech_cols = tech_df.columns.tolist()

    scaler = StandardScaler()
    df_scaled = pd.DataFrame(scaler.fit_transform(df), columns=df.columns)

    data = df_scaled.values  # numpy array shape: [total_steps, total_features]

    total_steps = data.shape[0]

    # 价格指标所在列的切片范围
    price_idx = slice(0, len(price_cols))

    inputs = []
    labels = []

    for i in range(total_steps - input_window - pred_window + 1):
        input_seq = data[
            i : i + input_window, :
        ]  # 输入包括所有指标，形状：[input_window, total_features]
        label_seq = data[
            i + input_window : i + input_window + pred_window, price_idx
        ]  # 预测目标，形状：[pred_window, 价格指标数]

        inputs.append(input_seq)
        labels.append(label_seq)

    # 转成 numpy 数组： shape (样本数, 时间长度, 特征维度)
    inputs = np.array(inputs)
    labels = np.array(labels)

    # 转成 torch.Tensor，默认是 (B, L, C)
    inputs = torch.tensor(inputs, dtype=torch.float32)
    labels = torch.tensor(labels, dtype=torch.float32)

    return inputs, labels


def load_config(path):
    with open(path, "r", encoding="utf-8") as f:
        cfg_dict = yaml.safe_load(f)
    return SimpleNamespace(**cfg_dict)


def train_test_split_by_ratio(inputs, labels, train_ratio=0.8):
    total_samples = inputs.shape[0]
    train_size = int(total_samples * train_ratio)
    train_inputs = inputs[:train_size]
    train_labels = labels[:train_size]
    test_inputs = inputs[train_size:]
    test_labels = labels[train_size:]
    return train_inputs, train_labels, test_inputs, test_labels


def plot_model_metrics_comparison(model_names, metrics_list, save_path=None):
    """
    绘制多个模型的 MAE 和 MSE 指标对比图。

    Parameters:
        model_names (list[str]): 模型名称列表
        metrics_list (list[dict]): 每个模型对应的评估结果（包含 'mae' 和 'mse'）
        save_path (str or None): 若提供路径，则保存图像，否则直接展示
    """
    maes = [m["mae"] for m in metrics_list]
    mses = [m["mse"] for m in metrics_list]

    x = np.arange(len(model_names))
    width = 0.35

    fig, ax = plt.subplots(figsize=(10, 5))
    ax.bar(x - width / 2, maes, width, label="MAE")
    ax.bar(x + width / 2, mses, width, label="MSE")

    ax.set_ylabel("Error")
    ax.set_title("Model Comparison on Test Set")
    ax.set_xticks(x)
    ax.set_xticklabels(model_names)
    ax.legend()
    ax.grid(True, linestyle="--", alpha=0.6)

    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches="tight")
    else:
        plt.show()


current_file_path = os.path.abspath(__file__)
current_dir = os.path.dirname(current_file_path)
root_dir = os.path.dirname(current_dir)

price_file_path = os.path.join(root_dir, "data/output/6_m_tech_price.csv")
tech_file_path = os.path.join(root_dir, "data/output/6_m_tech_tech.csv")

if __name__ == "__main__":
    current_file_path = os.path.abspath(__file__)
    current_dir = os.path.dirname(current_file_path)
    if current_dir not in sys.path:
        sys.path.append(current_dir)
    config_path = os.path.join(current_dir, "configs/baseline_config.yaml")
    baseline_config = load_config(config_path)
    inputs, labels = prepare_data_blc(
        price_file_path,
        tech_file_path,
        baseline_config.input_window,
        baseline_config.pred_window,
    )
    train_inputs, train_labels, test_inputs, test_labels = train_test_split_by_ratio(
        inputs, labels, train_ratio=0.8
    )
    model_list = ["Transformer", "TCN", "LSTM", "PatchTST"]
    # model_list = ["PatchTST"]
    all_metrics = []
    model_names = []
    models_dir = "models"
    for model_name in model_list:
        config_path = os.path.join(current_dir, f"configs/{model_name}_config.yaml")
        config = load_config(config_path)
        config.enc_in = inputs.shape[-1]
        config.c_out = labels.shape[-1]
        config.seq_len = baseline_config.input_window

        module = importlib.import_module(f"models.{model_name}")
        model_class = getattr(module, model_name)
        model = model_class(config)
        if model_name == "PatchTST":
            model.set_pretrain()
            model.set_baseline()
        train(model, train_inputs, train_labels)

        model.eval()
        with torch.no_grad():
            test_inputs = test_inputs.to(
                torch.device("cuda" if torch.cuda.is_available() else "cpu")
            )
            test_labels = test_labels.to(test_inputs.device)
            preds = model(test_inputs)

        y_true = test_labels.cpu().numpy().reshape(-1, test_labels.shape[-1])
        y_pred = preds.cpu().numpy().reshape(-1, preds.shape[-1])

        metrics = evaluate(y_true, y_pred)
        print(f"📊 {model_name} Test Evaluation:")
        print(f"MAE_norm: {metrics['mae']:.4f}")
        print(f"MSE_norm: {metrics['mse']:.4f}")
        all_metrics.append(metrics)
        model_names.append(model_name)
    plot_model_metrics_comparison(model_names, all_metrics)
