import json

import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import torch
from hyperparameter_env import HyperparameterEnv
import xgboost as xgb
from sklearn.metrics import accuracy_score

def get_model(best_params, X_train, y_train, X_val, y_val):
    """
    根据最佳参数创建并返回训练好的XGBoost模型。

    参数：
    - best_params (dict): 包含最佳超参数的字典。
    - X_train, y_train: 训练数据。
    - X_val, y_val: 验证数据。

    返回：
    - model: 训练好的XGBoost模型。
    """
    # 确保best_params中每个参数的类型正确
    try:
        learning_rate = float(best_params['learning_rate'])
        max_depth = int(best_params['max_depth'])
        n_estimators = int(best_params['n_estimators'])
    except (KeyError, ValueError, TypeError) as e:
        raise ValueError(f"Invalid best_params: {best_params}. Error: {e}")

    model = xgb.XGBClassifier(
        tree_method='hist',
        device="cuda",
        learning_rate=learning_rate,
        max_depth=max_depth,
        n_estimators=n_estimators,
        objective='multi:softprob',  # 与环境中的设置一致
        num_class=10,
        eval_metric='mlogloss'
    )

    # 启用早停
    model.fit(
        X_train, y_train,
        eval_set=[(X_val, y_val)],
        verbose=False
    )
    return model

def evaluate_best_params(agent, X_train, y_train, X_val, y_val, X_test, y_test):
    """
    使用代理评估缓存中的最佳超参数，训练并评估最终模型。

    参数：
    - agent: 训练好的DQN代理。
    - X_train, y_train: 训练数据。
    - X_val, y_val: 验证数据。
    - X_test, y_test: 测试数据。

    返回：
    - best_params (dict): 最佳超参数。
    - accuracy (float): 测试集上的准确率。
    """
    # 从缓存中选择最佳超参数
    best_accuracy = 0.0
    best_params = {}
    for params_key, acc in agent.env.evaluation_cache.items():
        if acc > best_accuracy:
            best_accuracy = acc
            best_params = json.loads(params_key)

    print(f"Best Accuracy in Cache: {best_accuracy:.4f} with params: {best_params}")

    # 检查 best_params 是否包含所有必要的超参数
    required_params = ['learning_rate', 'max_depth', 'n_estimators']
    for param in required_params:
        if param not in best_params:
            raise ValueError(f"Best params missing required parameter: {param}")

    # 创建并训练模型
    model = get_model(best_params, X_train, y_train, X_val, y_val)

    # 在测试集上评估
    y_pred = model.predict(X_test)
    accuracy = accuracy_score(y_test, y_pred)
    print(f"Final Model Accuracy with RL Tuned Params: {accuracy:.4f}")
    return best_params, accuracy


def evaluate_manual_params(manual_params, X_train, y_train, X_val, y_val, X_test, y_test):
    # Select the best manually tuned parameters based on validation accuracy
    best_manual_idx = np.argmax([accuracy_score(y_val, get_model(params, X_train, y_train, X_val, y_val).predict(X_val))
                                 for params in manual_params])

    best_manual_params = manual_params[best_manual_idx]
    print(f"\nBest Hyperparameters (Manual): {best_manual_params}")

    # Train the model with the best manually tuned parameters and evaluate on test set
    manual_model = get_model(best_manual_params, X_train, y_train, X_val, y_val)
    manual_y_pred = manual_model.predict(X_test)
    manual_test_accuracy = accuracy_score(y_test, manual_y_pred)
    print(f"Test Accuracy with Best Hyperparameters (Manual): {manual_test_accuracy * 100:.2f}%")

    return best_manual_params, manual_test_accuracy


def plot_accuracies(rl_best_acc, manual_accuracies):
    """
    比较 RL 调节和手动调节的验证准确率。

    参数:
    - rl_best_acc (list): RL 每个Episode的最佳验证准确率。
    - manual_accuracies (list): 手动调节的验证准确率列表。

    返回:
    - None
    """
    plt.figure(figsize=(14, 7))

    # RL Tuned Parameters
    episodes = range(1, len(rl_best_acc) + 1)
    plt.plot(episodes, rl_best_acc, label='RL Tuned Parameters', marker='o', color='blue', linestyle='-')

    # Manual Tuned Parameters
    manual_attempts = range(len(rl_best_acc) + 1, len(rl_best_acc) + len(manual_accuracies) + 1)
    plt.scatter(manual_attempts, manual_accuracies, color='red', label='Manual Tuned Parameters', s=100, marker='X')

    # Labels and Title
    plt.xlabel('Episode / Manual Attempt')
    plt.ylabel('Validation Accuracy')
    plt.title('RL Tuned vs Manual Tuned Hyperparameters on MNIST')

    # Adjust x-ticks to differentiate Episodes and Manual Attempts
    combined_x = list(episodes) + list(manual_attempts)
    plt.xticks(ticks=combined_x,
               labels=[f'Ep {i}' for i in episodes] + [f'Man {i}' for i in range(1, len(manual_accuracies) + 1)],
               rotation=45)

    plt.legend()
    plt.grid(True)
    plt.tight_layout()
    plt.show()


def plot_rl_rewards(rl_episode_rewards):
    """
    可视化 RL 代理每个 Episode 的总奖励。

    参数:
    - rl_episode_rewards (list): RL 每个Episode的总奖励列表。

    返回:
    - None
    """
    plt.figure(figsize=(14, 7))

    episodes = range(1, len(rl_episode_rewards) + 1)
    rewards = rl_episode_rewards

    # 绘制原始奖励
    plt.plot(episodes, rewards, label='Total Reward per Episode', marker='o', color='green', linestyle='-')

    # 计算并绘制移动平均（窗口大小为10）
    window_size = 10
    if len(rewards) >= window_size:
        moving_avg = np.convolve(rewards, np.ones(window_size) / window_size, mode='valid')
        plt.plot(range(window_size, len(rewards) + 1), moving_avg, label=f'Moving Average (window={window_size})',
                 color='orange')

    # 标注最高奖励所在的 Episode
    max_reward = max(rewards)
    max_episode = rewards.index(max_reward) + 1
    plt.scatter(max_episode, max_reward, color='red', s=200, label='Max Reward', marker='D')
    plt.annotate(f'Max Reward: {max_reward:.2f}', xy=(max_episode, max_reward), xytext=(max_episode, max_reward + 0.5),
                 arrowprops=dict(facecolor='red', shrink=0.05), fontsize=12, color='red')

    # 标签和标题
    plt.xlabel('Episode')
    plt.ylabel('Total Reward (Sum of Accuracies)')
    plt.title('RL Agent Training Progress')

    plt.legend()
    plt.grid(True)
    plt.tight_layout()
    plt.show()


def plot_manual_accuracies(manual_accuracies, manual_params):
    """
    可视化手动调节的验证准确率结果，并打印对应的超参数组合。

    参数:
    - manual_accuracies (list): 手动调节的验证准确率列表。
    - manual_params (list): 手动调节的超参数组合列表。

    返回:
    - None
    """
    plt.figure(figsize=(14, 7))
    sns.barplot(x=list(range(1, len(manual_accuracies) + 1)), y=manual_accuracies, palette='viridis')
    plt.xlabel('Manual Tuning Attempt')
    plt.ylabel('Validation Accuracy')
    plt.title('Manual Hyperparameter Tuning Results')

    # 设置x轴刻度标签
    plt.xticks(ticks=range(len(manual_accuracies)), labels=[f'Try {i}' for i in range(1, len(manual_accuracies) + 1)],
               rotation=45)

    # 设置y轴范围，稍微高于最高准确率以便显示
    plt.ylim(0, max(manual_accuracies) + 0.05)

    plt.grid(axis='y')
    plt.tight_layout()
    plt.show()

    # 打印手动调节的超参数组合和对应准确率
    print("\nManual Hyperparameter Tuning Results:")
    for idx, params in enumerate(manual_params):
        print(f"Attempt {idx + 1}: Params={params}, Accuracy={manual_accuracies[idx]:.4f}")
