# a_star_solver.py (附带详细注释的评估函数)
import heapq
import copy
import numpy as np
from deadlock_env import DeadlockEnv, KAPPA_ROLLBACK, KAPPA_PREEMPT, STATE_BLOCKED
import time


class AStarSolver:
    # ... [A* 算法的核心类代码保持不变] ...
    def __init__(self, env: DeadlockEnv):
        self.env = env
        self.action_types = ['preempt', 'rollback']

    def get_state_hash(self, obs):
        allocation_tuple = tuple(map(tuple, obs['allocation']))
        available_tuple = tuple(obs['available'])
        status_tuple = tuple(obs['processes_status'])
        return (available_tuple, allocation_tuple, status_tuple)

    def heuristic(self, obs):
        num_blocked = np.sum(obs['processes_status'] == STATE_BLOCKED)
        return num_blocked * KAPPA_PREEMPT

    def get_valid_actions(self, obs):
        actions = []
        for pid in range(self.env.num_processes):
            if obs['processes_status'][pid] != 2:
                actions.append({'type': 'rollback', 'process_id': pid})
            if obs['process_properties'][pid]['is_realtime'] and obs['processes_status'][pid] != 2:
                actions.append({'type': 'preempt', 'process_id': pid})
        return actions

    def solve(self):
        initial_obs = self.env._get_observation()
        start_state_hash = self.get_state_hash(initial_obs)
        open_set = [(self.heuristic(initial_obs), 0, start_state_hash, [])]
        g_scores = {start_state_hash: 0}

        while open_set:
            f_score, g_score, current_hash, path = heapq.heappop(open_set)

            temp_env = copy.deepcopy(self.env)
            for _, action_in_path in path:
                temp_env.step(action_in_path)

            if temp_env.is_safe():
                return path

            current_obs = temp_env._get_observation()

            for action in self.get_valid_actions(current_obs):
                neighbor_env = copy.deepcopy(temp_env)
                _, _, _, info, _ = neighbor_env.step(action)

                if not info['valid']:
                    continue

                neighbor_obs = neighbor_env._get_observation()
                neighbor_hash = self.get_state_hash(neighbor_obs)
                tentative_g_score = g_score + info['action_cost']

                if tentative_g_score < g_scores.get(neighbor_hash, float('inf')):
                    g_scores[neighbor_hash] = tentative_g_score
                    h_score = self.heuristic(neighbor_obs)
                    new_f_score = tentative_g_score + h_score
                    new_path = path + [(current_obs, action)]
                    heapq.heappush(open_set, (new_f_score, tentative_g_score, neighbor_hash, new_path))
        return None


def evaluate_a_star(num_episodes=100, num_processes=4, num_resources=4):
    """
    评估 A* 算法的性能，输出与 RL agent 相同的指标。
    """
    print(f"\n--- 开始评估 A* 算法性能 (共 {num_episodes} 个回合) ---")

    total_success = 0
    steps_to_solve = []
    rewards_per_episode = []

    start_time = time.time()
    for i in range(num_episodes):
        env = DeadlockEnv(num_processes=num_processes, num_resources=num_resources)
        env.reset()
        # 1. 保存当前回合的初始状态，这是计算奖励的起点
        initial_env_state = copy.deepcopy(env)

        # 2. A* 算法基于 action_cost 寻找最优路径
        solver = AStarSolver(env)
        optimal_path = solver.solve()

        if optimal_path is not None:
            # --- 如果找到了解，开始计算 RL Agent 世界中的指标 ---
            total_success += 1
            path_length = len(optimal_path)
            steps_to_solve.append(path_length)

            # --- 奖励计算的核心逻辑 ---
            reward_env = initial_env_state
            episode_reward = 0

            # 4. 遍历最优路径中的每一个动作
            for _, action in optimal_path:
                _, reward, _, _ = reward_env.step(action)

                # 6. 将这一步的奖励累加到回合总奖励中
                episode_reward += reward

            rewards_per_episode.append(episode_reward)
        else:
            print(f"警告: A* 在回合 {i + 1} 未找到解。")

        if (i + 1) % 10 == 0:
            print(f"  已完成 {i + 1}/{num_episodes} 个回合...")

    end_time = time.time()

    # 7. 计算并打印最终的平均指标
    success_rate = (total_success / num_episodes) * 100
    avg_steps = np.mean(steps_to_solve) if steps_to_solve else 0
    avg_reward = np.mean(rewards_per_episode) if rewards_per_episode else 0

    print("\n--- A* 评估结果 (最优基准) ---")
    print(f"评估耗时: {end_time - start_time:.2f} 秒")
    print(f"评估回合数: {num_episodes}")
    print(f"成功率: {success_rate:.2f}%")
    print(f"平均解决步数: {avg_steps:.2f}")
    print(f"平均每回合奖励: {avg_reward:.3f}")

    return success_rate, avg_steps, avg_reward


# --- 主程序入口 ---
if __name__ == '__main__':
    evaluate_a_star(num_episodes=50, num_processes=100, num_resources=50)
