# parallel_env.py
import numpy as np
from multiprocessing import Pool
from functools import partial
import gym
from gym import spaces
from typing import Dict, Tuple, List, Any
from environment import rm_simple_simulator


class ParallelEnv:
    def __init__(self, n_envs: int = 4, render_mode: str = None):
        self.n_envs = n_envs
        self.render_mode = render_mode
        self.env_fns = [
            partial(rm_simple_simulator, render_mode=render_mode) for _ in range(n_envs)
        ]
        # Create envs sequentially to avoid pickling issues with lambdas and gym
        self.envs = [env_fn() for env_fn in self.env_fns]
        self.observation_space = self.envs[0].observation_space
        self.action_space = self.envs[0].action_space
        self.current_obs = self.reset()

    def reset(self) -> Dict[str, np.ndarray]:
        obs_list = [env.reset() for env in self.envs]
        self.current_obs = {
            "visions": np.stack([obs["visions"] for obs, _ in obs_list]),
            "infos": {
                "time": np.stack([obs["infos"]["time"] for obs, _ in obs_list]),
                "hp": np.stack([obs["infos"]["hp"] for obs, _ in obs_list]),
                "bullet": np.stack([obs["infos"]["bullet"] for obs, _ in obs_list]),
                "attack_cooldown": np.stack(
                    [obs["infos"]["attack_cooldown"] for obs, _ in obs_list]
                ),
                "reborn_timer": np.stack(
                    [obs["infos"]["reborn_timer"] for obs, _ in obs_list]
                ),
                "scores": {
                    "red": np.stack(
                        [obs["infos"]["scores"]["red"] for obs, _ in obs_list]
                    ),
                    "blue": np.stack(
                        [obs["infos"]["scores"]["blue"] for obs, _ in obs_list]
                    ),
                },
            },
        }
        return self.current_obs

    def step(
        self, actions: np.ndarray
    ) -> Tuple[
        Dict[str, np.ndarray], np.ndarray, np.ndarray, np.ndarray, Dict[str, np.ndarray]
    ]:
        # actions shape: (n_envs, num_robots * 3)
        results = [env.step(act) for env, act in zip(self.envs, actions)]
        obs_list, rew_list, done_list, trunc_list, info_list = zip(*results)

        self.current_obs = {
            "visions": np.stack([obs["visions"] for obs in obs_list]),
            "infos": {
                "time": np.stack([obs["infos"]["time"] for obs in obs_list]),
                "hp": np.stack([obs["infos"]["hp"] for obs in obs_list]),
                "bullet": np.stack([obs["infos"]["bullet"] for obs in obs_list]),
                "attack_cooldown": np.stack(
                    [obs["infos"]["attack_cooldown"] for obs in obs_list]
                ),
                "reborn_timer": np.stack(
                    [obs["infos"]["reborn_timer"] for obs in obs_list]
                ),
                "scores": {
                    "red": np.stack(
                        [obs["infos"]["scores"]["red"] for obs in obs_list]
                    ),
                    "blue": np.stack(
                        [obs["infos"]["scores"]["blue"] for obs in obs_list]
                    ),
                },
            },
        }
        rewards = np.stack(rew_list)
        dones = np.stack(done_list)
        truncs = np.stack(trunc_list)
        infos = {
            k: [info.get(k, {}) for info in info_list]
            for k in info_list[0].keys()
            if info_list
        }
        if info_list:
            for k in info_list[0]:
                try:
                    infos[k] = np.stack(
                        [info.get(k, np.zeros_like(rewards[0])) for info in info_list]
                    )
                except:
                    infos[k] = [info.get(k, {}) for info in info_list]

        return self.current_obs, rewards, dones, truncs, infos

    def close(self):
        for env in self.envs:
            env.close()


# Main for testing
if __name__ == "__main__":
    import random

    n_envs = 4  # Small number for testing
    env = ParallelEnv(n_envs=n_envs, render_mode=None)  # No render for parallel

    obs = env.reset()
    print(
        "Reset observation shape:",
        {
            k: v.shape if hasattr(v, "shape") else type(v)
            for k, v in obs.items()
            if k != "infos"
        },
    )
    print("Infos keys:", obs["infos"].keys())

    done = False
    total_reward = 0
    for step in range(100):  # Run 100 steps
        actions = [env.action_space.sample() for _ in range(n_envs)]
        actions = np.array(actions)  # (n_envs, action_dim)
        obs, rewards, dones, truncs, infos = env.step(actions)
        total_reward += rewards.mean()
        print(f"Step {step}: Mean reward {rewards.mean():.4f}, Dones: {dones.any()}")
        if dones.any():
            break

    print(f"Total average reward over {step+1} steps: {total_reward / (step+1):.4f}")
    env.close()
