# train_dqn.py

import time
import os

import torch

from hyperparameter_env import HyperparameterEnv
from dqn_agent import DQNAgent
import json

def train_dqn_agent(X_train, y_train, X_val, y_val, episodes=400, log_file='episode_hyperparams.csv',
                   checkpoint_path='dqn_checkpoint.pth', cache_file='evaluation_cache.json',
                   path_file='agent_paths.json', early_stop_patience=20):
    # 初始化环境并传递基准准确率
    env = HyperparameterEnv(X_train, y_train, X_val, y_val, base_accuracy=0.9000,
                            cache_file=cache_file, path_file=path_file)
    state_size = 3  # (x, y, z)
    action_size = 6  # 六个方向
    agent = DQNAgent(state_size, action_size)
    agent.env = env  # 使代理能够访问环境（用于绘图等）

    # 检查是否存在检查点文件，如果存在则加载
    if os.path.exists(checkpoint_path):
        agent.load(checkpoint_path)
        print("已加载现有的DQN代理模型。")
    else:
        print("未找到现有的DQN代理模型，开始新训练。")

    episode_rewards = []  # 记录每个episode的总奖励
    best_acc_per_episode = []  # 记录每个episode的最佳准确率
    highest_accuracy = 0.0  # 跟踪最高准确率
    patience_counter = 0  # 耐心计数器

    # 添加 trained_states.json 的初始化
    trained_states = []
    trained_states_file = 'trained_states.json'

    # 如果存在已记录的 trained_states.json，加载它
    if os.path.exists(trained_states_file):
        with open(trained_states_file, 'r') as f:
            trained_states = json.load(f)
        print(f"Loaded {len(trained_states)} trained states from {trained_states_file}.")
    else:
        print(f"No existing {trained_states_file} found. Starting fresh.")

    start_time = time.time()
    print("DQN训练开始，计时器启动")

    # 打开日志文件，准备写入（追加模式）
    with open(log_file, 'a') as f:
        # 如果文件为空，写入文件头
        if os.stat(log_file).st_size == 0:
            f.write("Episode,Learning_Rate,Max_Depth,N_Estimators,Best_Accuracy,Total_Reward,Epsilon,Value\n")

        for episode in range(episodes):
            state = env.reset()
            done = False
            total_reward = 0
            best_acc = 0
            episode_steps = 0  # 记录每个episode的步骤数
            while not done:
                # 当前状态
                state_input = state  # [x, y, z]

                # 选择动作
                action = agent.act(state_input)

                # 执行动作
                next_state, reward, done = env.step(action)

                # 记忆
                agent.remember(state_input, action, reward, next_state, done)

                # 经验回放并训练
                agent.replay()

                state = next_state
                total_reward += reward
                episode_steps += 1
                if reward > best_acc:
                    best_acc = reward

            # 计算状态价值 V(s) = max_a Q(s, a)
            state_tensor = torch.FloatTensor(state_input).unsqueeze(0).to(agent.device)
            with torch.no_grad():
                q_values = agent.policy_net(state_tensor)
                value = torch.max(q_values).item()

            # 记录训练状态
            trained_state = {
                "learning_rate": env.param_space['learning_rate'][state[0]],
                "max_depth": env.param_space['max_depth'][state[1]],
                "n_estimators": env.param_space['n_estimators'][state[2]],
                "accuracy": env.best_accuracy,
                "value": value
            }
            trained_states.append(trained_state)

            # 每100个Episode保存一次 trained_states.json
            if (episode + 1) % 100 == 0 or (episode + 1) == episodes:
                with open(trained_states_file, 'w') as ts_f:
                    json.dump(trained_states, ts_f, indent=4)
                print(f"Saved {len(trained_states)} trained states to {trained_states_file}.")

            # 记录Episode的结果
            episode_rewards.append(total_reward)
            best_acc_per_episode.append(best_acc)

            # 获取选择的超参数
            selected_params = {
                'learning_rate': env.param_space['learning_rate'][state[0]],
                'max_depth': env.param_space['max_depth'][state[1]],
                'n_estimators': env.param_space['n_estimators'][state[2]]
            }

            # 获取当前epsilon
            current_epsilon = agent.epsilon

            # 写入日志，包括 value 字段
            f.write(f"{episode + 1},{selected_params.get('learning_rate', 'N/A')},"
                    f"{selected_params.get('max_depth', 'N/A')},"
                    f"{selected_params.get('n_estimators', 'N/A')},"
                    f"{env.best_accuracy:.4f},{total_reward:.4f},{current_epsilon:.4f},{value:.4f}\n")

            print(
                f"Episode {episode + 1}/{episodes}, Total Reward: {total_reward:.4f}, "
                f"Best Accuracy: {env.best_accuracy:.4f}, Epsilon: {current_epsilon:.4f}, Value: {value:.4f}"
            )

            # 检查是否需要保存模型
            if env.best_accuracy > highest_accuracy:
                highest_accuracy = env.best_accuracy
                agent.save(checkpoint_path)
                print(f"新最高准确率 {highest_accuracy:.4f}，模型已保存。")
                patience_counter = 0  # 重置耐心计数器
            else:
                patience_counter += 1  # 增加耐心计数器
                print(f"No improvement in Best Accuracy for {patience_counter} episodes.")

            # 检查耐心计数器是否达到早停条件
            if patience_counter >= early_stop_patience:
                print(f"Early stopping triggered after {episode + 1} episodes without improvement.")
                break

            # 衰减探索率
            agent.decay_epsilon()

            # 保存当前Episode的路径
            env.finalize_episode()

    # 保存最终的 trained_states.json
    with open('trained_states_final.json', 'w') as ts_f:
        json.dump(trained_states, ts_f, indent=4)
    print(f"Final trained states saved to 'trained_states_final.json'.")

    end_time = time.time()  # 结束计时
    elapsed_time = end_time - start_time

    print(f"\nDQN训练完成，总运行时间: {elapsed_time / 60:.2f} 分钟")
    return agent, episode_rewards, best_acc_per_episode
