import logging
import random

import numpy as np
import time

import torch

from deadlock_env import DeadlockEnv
from a_star_solver import AStarSolver
from util.logger_util import createLogger
from util.state_processor import process_observation_to_sequence
from util.symbolic_manager import SymbolicManager

logger = createLogger('Train')
def action_id_to_dict(action_id, num_processes):
    action_types = ['preempt', 'rollback']
    pid = action_id % num_processes
    action_type = action_types[action_id // num_processes]
    return {'type': action_type, 'process_id': pid}


def evaluate_symbolic_agent(model, config, num_episodes, max_steps_per_episode, device, symbol_manager: SymbolicManager):
    """
    评估一个符号化的Seq2Seq智能体。
    此函数现在通过传入的symbol_manager工作。
    """
    model.eval()

    total_success = 0
    steps_to_solve = []
    rewards_per_episode = []

    for i in range(num_episodes):
        env = DeadlockEnv(num_processes=config['NUM_PROCESSES'], num_resources=config['NUM_RESOURCES'])
        obs, mask = env.reset()

        done = False
        episode_reward = 0
        episode_steps = 0

        while not done and episode_steps < max_steps_per_episode:
            # 1. 将当前观测值处理为序列张量
            state_tensor = process_observation_to_sequence(
                obs
            ).unsqueeze(0).to(device)  # 增加batch维度

            # 2. 从模型生成符号轨迹
            with torch.no_grad():
                trace_ids = model.generate(
                    src=state_tensor,
                    max_len=config.get('MAX_TRACE_LENGTH', 50),
                    start_symbol=symbol_manager.START,
                    end_symbol=symbol_manager.END
                ).squeeze(0).cpu().numpy()  # 移除batch维度

            # 3. 从生成的轨迹中解析动作
            # --- [修改] 通过实例调用解析方法 ---
            action_dict = symbol_manager.parse_action_from_trace(trace_ids)
            # logger.info(action_dict)
            # 回退机制: 如果解析失败，采取一个随机有效动作
            if action_dict is None:
                action_dict = {'type': 'rollback', 'process_id': random.randint(0, config['NUM_PROCESSES'] - 1)}
            # print(f"[DEBUG] Attempting action: {action_dict}")
            # 4. 在环境中执行动作
            obs, reward, done, _, mask = env.step(action_dict)

            episode_reward += reward
            episode_steps += 1

        if done:  # 在此环境中 'done' 意味着死锁已解决
            total_success += 1
            steps_to_solve.append(episode_steps)

        rewards_per_episode.append(episode_reward)

    success_rate = (total_success / num_episodes) * 100
    avg_steps = np.mean(steps_to_solve) if steps_to_solve else max_steps_per_episode
    avg_reward = np.mean(rewards_per_episode) if rewards_per_episode else 0

    return success_rate, avg_steps, avg_reward


def evaluate_a_star(config, num_episodes=100):
    print(f"\n--- 正在评估 A* Solver (共 {num_episodes} 个回合)... ---")
    total_success, steps_to_solve = 0, []
    start_time = time.time()
    for i in range(num_episodes):
        env = DeadlockEnv(num_processes=config['NUM_PROCESSES'], num_resources=config['NUM_RESOURCES'])
        env.reset()
        solver = AStarSolver(env)
        optimal_path = solver.solve()
        if optimal_path is not None:
            total_success += 1;
            steps_to_solve.append(len(optimal_path))
    end_time = time.time()
    success_rate = (total_success / num_episodes) * 100
    avg_steps = np.mean(steps_to_solve) if steps_to_solve else 0
    total_time = end_time - start_time
    return success_rate, avg_steps, total_time


def evaluate_random_agent(config, num_episodes=100):
    env = DeadlockEnv(num_processes=config['NUM_PROCESSES'], num_resources=config['NUM_RESOURCES'])
    num_actions = 2 * env.num_processes
    total_success, steps_to_solve = 0, []
    start_time = time.time()
    for _ in range(num_episodes):
        obs, done, step_count = env.reset(), False, 0
        while not done:
            step_count += 1
            action_id = np.random.randint(0, num_actions)
            action_dict = action_id_to_dict(action_id, env.num_processes)
            obs, reward, done, info = env.step(action_dict)
            if step_count >= 100:  break
        if done:
            total_success += 1;
            steps_to_solve.append(step_count)
    end_time = time.time()
    success_rate = (total_success / num_episodes) * 100
    avg_steps = np.mean(steps_to_solve) if steps_to_solve else 0
    total_time = end_time - start_time
    return success_rate, avg_steps, total_time
