# hyperparameter_env.py

import xgboost as xgb
from sklearn.metrics import accuracy_score
import torch
from concurrent.futures import ThreadPoolExecutor
import json
import os

class HyperparameterEnv:
    def __init__(self, X_train, y_train, X_val, y_val, base_accuracy=0.9000,
                 cache_file='evaluation_cache.json', path_file='agent_paths.json'):
        # 定义超参数及其可能的取值
        self.param_space = {
            'learning_rate': [0.01, 0.02, 0.05, 0.1, 0.15, 0.2, 0.25],
            'max_depth': [3, 4, 5, 6, 7, 8, 9],
            'n_estimators': [25, 50, 75, 100, 125, 150, 175]
        }
        self.X_train = X_train
        self.y_train = y_train
        self.X_val = X_val
        self.y_val = y_val
        self.base_accuracy = base_accuracy  # 基准准确率

        # 文件路径
        self.cache_file = cache_file
        self.path_file = path_file

        # 初始化缓存和路径
        self.evaluation_cache = self.load_cache()
        self.agent_paths = self.load_paths()

        # 并行训练
        self.executor = ThreadPoolExecutor(max_workers=4)  # 根据您的CPU核心数调整

        self.reset()

    def reset(self):
        self.position = [0, 0, 0]  # 初始位置 (0,0,0)
        self.done = False
        self.best_accuracy = 0.0
        self.current_episode_path = [self.position.copy()]  # 记录当前Episode的路径
        return self.get_state()

    def get_state(self):
        # 状态由当前的位置表示
        return self.position.copy()

    def step(self, action):
        # 定义动作对应的方向和调整
        if action == 0:  # 向X轴正方向移动，增加learning_rate
            self.position[0] = min(self.position[0] + 1, 6)
        elif action == 1:  # 向X轴负方向移动，减少learning_rate
            self.position[0] = max(self.position[0] - 1, 0)
        elif action == 2:  # 向Y轴正方向移动，增加max_depth
            self.position[1] = min(self.position[1] + 1, 6)
        elif action == 3:  # 向Y轴负方向移动，减少max_depth
            self.position[1] = max(self.position[1] - 1, 0)
        elif action == 4:  # 向Z轴正方向移动，增加n_estimators
            self.position[2] = min(self.position[2] + 1, 6)
        elif action == 5:  # 向Z轴负方向移动，减少n_estimators
            self.position[2] = max(self.position[2] - 1, 0)
        else:
            raise ValueError(f"Invalid action: {action}")

        # 获取当前超参数值
        current_params = {
            'learning_rate': self.param_space['learning_rate'][self.position[0]],
            'max_depth': self.param_space['max_depth'][self.position[1]],
            'n_estimators': self.param_space['n_estimators'][self.position[2]]
        }

        # 将超参数组合转换为字符串以用作缓存键
        params_key = json.dumps(current_params, sort_keys=True)

        # 检查缓存
        if params_key in self.evaluation_cache:
            accuracy = self.evaluation_cache[params_key]
            print(f"Retrieved cached accuracy: {accuracy:.4f} for params: {current_params}")
        else:
            # 异步训练并评估模型
            future = self.executor.submit(self.train_evaluate, current_params)
            accuracy = future.result()
            self.evaluation_cache[params_key] = accuracy  # 存储到缓存
            self.save_cache()  # 保存缓存到文件

        # 定义奖励为准确率与基准的平方差
        reward = self.calculate_reward(accuracy)

        # 更新最佳准确率
        self.best_accuracy = max(self.best_accuracy, accuracy)

        # 记录路径
        self.current_episode_path.append(self.position.copy())

        # 检查是否达到终点（例如，所有超参数已调节）
        self.done = self.is_done()

        next_state = self.get_state()
        return next_state, reward, self.done

    def is_done(self):
        # 定义Episode的结束条件，例如达到终点位置 (6,6,6)
        return self.position == [6, 6, 6]

    def train_evaluate(self, params):
        # 使用当前参数训练XGBoost模型并评估准确率

        model = xgb.XGBClassifier(
            tree_method="hist",
            device="cuda",
            learning_rate=params['learning_rate'],
            max_depth=params['max_depth'],
            n_estimators=params['n_estimators'],
            objective='multi:softprob',  # 推荐使用 softprob
            num_class=10,
            eval_metric='mlogloss'
        )
        model.fit(
            self.X_train, self.y_train,
            eval_set=[(self.X_val, self.y_val)],
            verbose=False
        )
        y_pred = model.predict(self.X_val)
        accuracy = accuracy_score(self.y_val, y_pred)
        print(f"Model Accuracy: {accuracy:.4f} with params: {params}")
        return accuracy

    def calculate_reward(self, accuracy):
        base = self.base_accuracy
        diff = accuracy - base
        if diff < 0:
            reward = - (diff) ** 2
        elif diff == 0:
            reward = 0
        else:
            reward = (diff) ** 2
        return reward

    def load_cache(self):
        if os.path.exists(self.cache_file):
            with open(self.cache_file, 'r') as f:
                cache = json.load(f)
            print(f"Loaded evaluation cache from {self.cache_file}, {len(cache)} entries.")
            return cache
        else:
            print(f"No existing evaluation cache found at {self.cache_file}. Starting fresh.")
            return {}

    def save_cache(self):
        with open(self.cache_file, 'w') as f:
            json.dump(self.evaluation_cache, f, indent=4)
        print(f"Saved evaluation cache to {self.cache_file}.")

    def load_paths(self):
        if os.path.exists(self.path_file):
            with open(self.path_file, 'r') as f:
                paths = json.load(f)
            print(f"Loaded agent paths from {self.path_file}, {len(paths)} episodes.")
            return paths
        else:
            print(f"No existing agent paths found at {self.path_file}. Starting fresh.")
            return []

    def save_paths(self):
        # 保存所有Episode的路径
        with open(self.path_file, 'w') as f:
            json.dump(self.agent_paths, f, indent=4)
        print(f"Saved agent paths to {self.path_file}.")

    def finalize_episode(self):
        # 在每个Episode结束时，保存路径
        self.agent_paths.append(self.current_episode_path.copy())
        self.save_paths()
