import torch
import json
import os
import numpy as np

# --- Core Imports for the New Model ---
from deadlock_env import DeadlockEnv
from net.numerical_coat_net import NumericalCoATNet
from util.symbolic_manager import SymbolicManager
from util.evaluate import evaluate_symbolic_agent, evaluate_random_agent

# --- Baseline Imports ---
from a_star_solver import evaluate_a_star

RL_MODEL_DIR = "models/sft_symbolic_20251013_115258"
SFT_MODEL_DIR = "models/sft_symbolic_20251013_115258"

NUM_EVAL_EPISODES = 100
MAX_STEPS_PER_EPISODE = 50

if __name__ == '__main__':
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    results = {}
    config = None  # To be loaded from the model's config file

    # --- 1. Evaluate RL Agent ---
    print("--- 1. Evaluating RL Agent (Symbolic CoT) ---")
    try:
        config_path = os.path.join(RL_MODEL_DIR, 'config.json')
        model_path = os.path.join(RL_MODEL_DIR, 'model.pth')  # Or 'model_final.pth' for RL models
        with open(config_path, 'r') as f:
            config = json.load(f)

        # Create the specific manager for this model's config
        rl_symbol_manager = SymbolicManager(
            num_processes=config['NUM_PROCESSES'],
            num_resources=config['NUM_RESOURCES']
        )

        # Verify vocab size consistency
        if config['VOCAB_SIZE'] != rl_symbol_manager.get_vocab_size():
            print(
                f"Warning: Mismatch between config vocab size ({config['VOCAB_SIZE']}) and manager vocab size ({rl_symbol_manager.get_vocab_size()}). Using config size.")

        rl_model = NumericalCoATNet(
            num_resources=config['NUM_RESOURCES'],
            vocab_size=config['VOCAB_SIZE'],
            d_model=config['D_MODEL'],
            nhead=config['N_HEAD'],
            num_encoder_layers=config['NUM_ENCODER_LAYERS'],
            num_decoder_layers=config['NUM_DECODER_LAYERS']
        ).to(device)
        rl_model.load_state_dict(torch.load(model_path, map_location=device))

        # Pass the manager to the evaluation function
        results['RL Agent'] = evaluate_symbolic_agent(
            rl_model, config, NUM_EVAL_EPISODES, MAX_STEPS_PER_EPISODE, device, rl_symbol_manager
        )

    except Exception as e:
        print(f"Error evaluating RL model ({RL_MODEL_DIR}): {e}")
        results['RL Agent'] = ('Error', 'Error', 'Error')

    # --- 2. Evaluate SFT Agent ---
    print("\n--- 2. Evaluating SFT Agent (Symbolic CoT) ---")
    try:
        config_path = os.path.join(SFT_MODEL_DIR, 'config.json')
        model_path = os.path.join(SFT_MODEL_DIR, 'model.pth')
        with open(config_path, 'r') as f:
            sft_config = json.load(f)
            if not config: config = sft_config

        sft_symbol_manager = SymbolicManager(
            num_processes=sft_config['NUM_PROCESSES'],
            num_resources=sft_config['NUM_RESOURCES']
        )

        sft_model = NumericalCoATNet(
            num_resources=sft_config['NUM_RESOURCES'],
            vocab_size=sft_config['VOCAB_SIZE'],
            d_model=sft_config['D_MODEL'],
            nhead=sft_config['N_HEAD'],
            num_encoder_layers=sft_config['NUM_ENCODER_LAYERS'],
            num_decoder_layers=sft_config['NUM_DECODER_LAYERS']
        ).to(device)
        sft_model.load_state_dict(torch.load(model_path, map_location=device))

        # Pass the manager to the evaluation function
        results['SFT Agent'] = evaluate_symbolic_agent(
            sft_model, sft_config, NUM_EVAL_EPISODES, MAX_STEPS_PER_EPISODE, device, sft_symbol_manager
        )

    except Exception as e:
        print(f"Error evaluating SFT model ({SFT_MODEL_DIR}): {e}")
        results['SFT Agent'] = ('Error', 'Error', 'Error')

    # --- 3. Evaluate Baselines ---
    if config:
        print("\n--- 3. Evaluating A* Solver (Optimal Baseline) ---")
        results['A* Solver'] = evaluate_a_star(
            num_episodes=NUM_EVAL_EPISODES,
            num_processes=config['NUM_PROCESSES'],
            num_resources=config['NUM_RESOURCES']
        )

        print("\n--- 4. Evaluating Random Agent (Random Baseline) ---")
        results['Random Agent'] = evaluate_random_agent(
            config,
            num_episodes=NUM_EVAL_EPISODES
        )
    else:
        print("\nCould not load a config file. Skipping A* and Random Agent baselines.")

    # --- 4. Print Final Report ---
    print("\n\n" + "=" * 70)
    print(" " * 22 + "Model Capability Evaluation Report")
    print("=" * 70)
    print(f"{'Method':<15} | {'Success Rate (%)':<20} | {'Avg Steps':<12} | {'Avg Reward':<15}")
    print("-" * 70)
    for name, metrics in results.items():
        if isinstance(metrics, tuple) and len(metrics) == 3:
            sr, steps, reward = metrics
            sr_str = f"{sr:.2f}" if isinstance(sr, (int, float)) else sr
            steps_str = f"{steps:.2f}" if isinstance(steps, (int, float)) else steps
            reward_str = f"{reward:.3f}" if isinstance(reward, (int, float)) else reward
            print(f"{name:<15} | {sr_str:<20} | {steps_str:<12} | {reward_str:<15}")
        else:
            print(f"{name:<15} | {'N/A':<20} | {'N/A':<12} | {'N/A':<15}")
    print("=" * 70)