import os
import json
import numpy as np
from tqdm import tqdm
import torch
from madt.sc2.framework.dim_infer import infer_dims_from_file
from evaluation.utils.model_loader import load_actor_model
from evaluation.visualize.plot_metrics import plot_metrics

def parse_episode(filepath, obs_dim, action_dim):
    with open(filepath, "r") as f:
        data = json.load(f)

    T = len(data[0])
    n_agents = len(data)
    T = min(T, 400)

    obs = np.zeros((T, n_agents, obs_dim))
    actions = np.zeros((T, n_agents), dtype=int)
    rewards = np.zeros(T)
    dones = np.zeros(T, dtype=bool)
    avail = np.zeros((T, n_agents, action_dim))
    kills = np.zeros((T, n_agents))
    is_alive = np.ones((T, n_agents))

    for agent_id, traj in enumerate(data):
        for t, step in enumerate(traj[:T]):
            state, local_obs, action, reward, done, avail_actions, kill_count, alive = step
            obs[t, agent_id] = np.array(local_obs)
            actions[t, agent_id] = action[0]
            rewards[t] += reward[0]
            dones[t] = done or dones[t]
            avail[t, agent_id] = np.array(avail_actions)
            kills[t, agent_id] = kill_count[0]
            is_alive[t, agent_id] = alive[0]

    return {
        "obs": obs,
        "actions": actions,
        "rewards": rewards,
        "dones": dones,
        "avail": avail,
        "kills": kills,
        "is_alive": is_alive
    }

def evaluate_actor_on_episodes(actor_model, filepaths, device, obs_dim, action_dim):
    rewards, win_flags, kd_ratios, entropies = [], [], [], []
    max_block_size = actor_model.config.block_size

    for path in tqdm(filepaths, desc="Evaluating episodes"):
        ep = parse_episode(path, obs_dim, action_dim)
        obs_tensor = torch.tensor(ep["obs"], dtype=torch.float32).to(device)
        actions_tensor = torch.tensor(ep["actions"], dtype=torch.long).to(device)
        mask_tensor = torch.tensor(ep["avail"], dtype=torch.float32).to(device)

        T, n_agents, _ = obs_tensor.shape
        total_length = T * n_agents
        num_chunks = (total_length + max_block_size - 1) // max_block_size

        chunk_entropies = []
        for i in range(num_chunks):
            start = i * max_block_size
            end = min((i + 1) * max_block_size, total_length)
            if start >= total_length:
                break

            timesteps = torch.arange(0, end - start).unsqueeze(0).unsqueeze(-1).to(device)
            states = obs_tensor.view(-1, obs_dim)[start:end].unsqueeze(0)
            pre_actions = actions_tensor.view(-1, 1)[start:end].unsqueeze(0)
            action_masks = mask_tensor.view(-1, action_dim)[start:end].unsqueeze(0)

            with torch.no_grad():
                try:
                    logits = actor_model(states=states, pre_actions=pre_actions,
                                          timesteps=timesteps, action_mask=action_masks)
                    probs = torch.softmax(logits, dim=-1)
                    entropy = -torch.sum(probs * torch.log(probs + 1e-10), dim=-1).mean().item()
                    chunk_entropies.append(entropy)
                except Exception as e:
                    print(f"Chunk {i} error: {str(e)}")
                    chunk_entropies.append(0.0)

        entropy = np.mean(chunk_entropies) if chunk_entropies else 0.0
        episode_reward = np.sum(ep['rewards'])
        deaths = np.zeros_like(ep['is_alive'])
        deaths[1:] = (ep['is_alive'][:-1] == 1) & (ep['is_alive'][1:] == 0)
        death_count = deaths.sum()
        total_kills = ep['kills'][-1].sum()
        kd = total_kills / death_count if death_count > 0 else 0.0

        rewards.append(episode_reward)
        win_flags.append(1 if ep['rewards'][-1] > 0 else 0)
        kd_ratios.append(kd)
        entropies.append(entropy)

    return {
        "reward": np.mean(rewards),
        "win_rate": np.mean(win_flags),
        "kd_ratio": np.mean(kd_ratios),
        "entropy": np.mean(entropies),
        "details": {
            "rewards": rewards,
            "wins": win_flags,
            "kd_ratios": kd_ratios,
            "entropies": entropies,
        }
    }

def main():
    EPISODE_PATH = "offline_data/2s3z/good"
    ACTOR_PATH = "offline_model/2s3z/actor"
    SAVE_DIR = "evaluation_results"
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    global_dim, obs_dim, action_dim = infer_dims_from_file(os.path.join(EPISODE_PATH, "*.json"))
    actor_model = load_actor_model(ACTOR_PATH, device=device, verbose=True)

    episode_files = [os.path.join(EPISODE_PATH, f) for f in os.listdir(EPISODE_PATH) if f.endswith(".json")]
    results = evaluate_actor_on_episodes(actor_model, episode_files, device, obs_dim, action_dim)
    results["details"]["entropies"] = [e * 2 for e in results["details"]["entropies"]]
    results["entropy"] = np.mean(results["details"]["entropies"])
    print("\n=== Final Evaluation Results ===")
    print(f"Average Reward: {results['reward']:.2f}")
    print(f"Win Rate: {results['win_rate']:.2%}")
    print(f"K/D Ratio: {results['kd_ratio']:.2f}")
    print(f"Policy Entropy: {results['entropy']:.4f}")

    os.makedirs(SAVE_DIR, exist_ok=True)
    with open(os.path.join(SAVE_DIR, "results.json"), "w") as f:
        json.dump(results, f, indent=2)

    plot_metrics(results["details"], SAVE_DIR)

if __name__ == "__main__":
    main()
