from typing import SupportsFloat, Any, Tuple, Dict, Optional

import numpy as np
from numpy import ndarray
from collections import deque
import cv2

import pettingzoo as ptz
from pettingzoo import atari, butterfly
from pettingzoo.utils.env import AgentID, ActionType, ObsType

import gymnasium as gym
from gymnasium.core import WrapperActType, WrapperObsType

ENV_DICT = {
    "double-dunk": atari.double_dunk_v3,
    "emtombed-cooperative": atari.entombed_cooperative_v3,
    "foozpong": atari.foozpong_v3,
    "quadrapong": atari.quadrapong_v4,
    "space-invaders": atari.space_invaders_v2,
    "volleyball-pong": atari.volleyball_pong_v3,
    "warlords": atari.warlords_v3,
    "wizard-of-wor": atari.wizard_of_wor_v3,
    "cooperative-pong": butterfly.cooperative_pong_v5,
    "knights-archers-zombies": butterfly.knights_archers_zombies_v10,
    "pistonball": butterfly.pistonball_v6,
}


def create_pettingzoo_env(env_name):
    assert env_name in ENV_DICT.keys(), env_name + "is not supported!"
    env = ENV_DICT[env_name].parallel_env(render_mode="rgb_array")
    return env


class PettingzooToGymnasiumEnv(gym.Env):
    def __init__(self, pettingzoo_env: ptz.ParallelEnv):
        self.env = pettingzoo_env
        self.agent_num = len(self.env.possible_agents)
        self.single_agent_observation_shape = self.env.observation_space(self.env.possible_agents[0])
        self.single_agent_action_shape = self.env.action_space(self.env.possible_agents[0])
        self.observation_space = self.single_agent_observation_shape
        self.action_space = self.single_agent_action_shape

    def sample_random_action(self) \
            -> np.ndarray:
        actions = np.stack([self.env.action_space(agent).sample() for agent in self.env.possible_agents])
        return actions

    def dict_obs_to_np_obs(self, raw_obs: Dict[str, Any]) \
            -> np.ndarray:
        np_obs = []
        for agent in self.env.possible_agents:
            np_obs.append(raw_obs[agent])
        np_obs = np.stack(np_obs)
        return np_obs

    def reset(self, *, seed: Optional[int] = None, options: Optional[Dict[str, Any]] = None) \
            -> Tuple[WrapperObsType, Dict[str, Any]]:
        raw_obs, info = self.env.reset(seed=seed, options=options)
        obs = self.dict_obs_to_np_obs(raw_obs)
        return obs, info

    def step(self, action: WrapperActType) \
            -> tuple[ndarray | ndarray, dict[Any, float], dict[Any, bool], dict[Any, bool], dict[Any, dict]]:
        raw_action = {}
        for agent, act in zip(self.env.possible_agents, action):
            raw_action[agent] = act
        raw_obs, reward, terminated, truncations, info = self.env.step(raw_action)
        obs = self.dict_obs_to_np_obs(raw_obs)
        return obs, reward, terminated, truncations, info


class ResizeObservationWrapper(ptz.utils.wrappers.BaseParallelWrapper):
    def __init__(self, pettingzoo_env: ptz.ParallelEnv, shape: tuple[int, int] | int = (84, 84)) -> None:
        super().__init__(pettingzoo_env)
        self.env = pettingzoo_env
        self.shape = shape
        if isinstance(shape, int):
            shape = (shape, shape)
        assert len(shape) == 2 and all(
            x > 0 for x in shape
        ), f"Expected shape to be a 2-tuple of positive integers, got: {shape}"

        self.shape = tuple(shape)
        self.obs_shape = self.shape
        self.obs_spaces = dict()
        for agent in self.possible_agents:
            dims = len(self.env.observation_space(agent).shape)
            assert (
                    dims == 2 or dims == 3
            ), f"Expected the observation space to have 2 or 3 dimensions, got: {dims}"
            obs_shape = self.shape + self.env.observation_space(agent).shape[2:]
            self.obs_spaces[agent] = gym.spaces.Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)

    def resize(self, observation):
        resized_obs = dict()
        for agent in self.possible_agents:
            resized_obs[agent] = cv2.resize(observation[agent], self.shape[::-1],
                                            interpolation=cv2.INTER_AREA)
        return resized_obs

    def reset(
            self, seed: int | None = None, options: dict | None = None
    ) -> tuple[dict[AgentID, ObsType], dict[AgentID, dict]]:
        observation, info = self.env.reset(seed=seed, options=options)
        return self.resize(observation), info

    def step(
            self, actions: dict[AgentID, ActionType]
    ) -> tuple[
        dict[AgentID, ObsType],
        dict[AgentID, float],
        dict[AgentID, bool],
        dict[AgentID, bool],
        dict[AgentID, dict],
    ]:
        observation, reward, terminated, truncations, info = self.env.step(actions)
        return self.resize(observation), reward, terminated, truncations, info

    def observation_space(self, agent: AgentID) -> gym.spaces.Space:
        return self.obs_spaces[agent]


class GrayScaleObservationWrapper(ptz.utils.wrappers.BaseParallelWrapper):
    def __init__(self, pettingzoo_env: ptz.ParallelEnv, keep_dim: bool = False) -> None:
        super().__init__(pettingzoo_env)
        self.env = pettingzoo_env
        self.keep_dim = keep_dim

        self.obs_spaces = dict()
        for agent in self.possible_agents:
            assert (
                    isinstance(self.env.observation_space(agent), gym.spaces.Box)
                    and len(self.env.observation_space(agent).shape) == 3
                    and self.env.observation_space(agent).shape[-1] == 3
            )

            obs_shape = self.env.observation_space(agent).shape[:2]
            self.obs_shape = obs_shape
            if self.keep_dim:
                self.obs_spaces[agent] = gym.spaces.Box(
                    low=0, high=255, shape=(obs_shape[0], obs_shape[1], 1), dtype=np.uint8
                )
            else:
                self.obs_spaces[agent] = gym.spaces.Box(
                    low=0, high=255, shape=obs_shape, dtype=np.uint8
                )

    def gray_scale(self, observation):
        gray_obs = dict()
        for agent in self.possible_agents:
            gray_obs[agent] = cv2.cvtColor(observation[agent], cv2.COLOR_RGB2GRAY)
            if self.keep_dim:
                gray_obs[agent] = np.expand_dims(gray_obs[agent], -1)
        return gray_obs

    def reset(
            self, seed: int | None = None, options: dict | None = None
    ) -> tuple[dict[AgentID, ObsType], dict[AgentID, dict]]:
        observation, info = self.env.reset(seed=seed, options=options)
        return self.gray_scale(observation), info

    def step(
            self, actions: dict[AgentID, ActionType]
    ) -> tuple[
        dict[AgentID, ObsType],
        dict[AgentID, float],
        dict[AgentID, bool],
        dict[AgentID, bool],
        dict[AgentID, dict],
    ]:
        observation, reward, terminated, truncations, info = self.env.step(actions)
        return self.gray_scale(observation), reward, terminated, truncations, info

    def observation_space(self, agent: AgentID) -> gym.spaces.Space:
        return self.obs_spaces[agent]


class FrameStackWrapper(ptz.utils.wrappers.BaseParallelWrapper):
    def __init__(self, pettingzoo_env: ptz.ParallelEnv, num_stack: int, lz4_compress: bool = False) -> None:
        super().__init__(pettingzoo_env)
        self.env = pettingzoo_env
        self.num_stack = num_stack
        self.lz4_compress = lz4_compress
        self.observation, self.info = self.env.reset()
        self.frames = dict()
        self.obs_spaces = dict()
        for agent in self.possible_agents:
            self.frames[agent] = deque(maxlen=num_stack)
            shape = (num_stack,) + self.env.observation_space(agent).shape
            self.obs_shape = shape
            self.obs_spaces[agent] = gym.spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8)

    def stack(self, observation):
        for agent in self.possible_agents:
            self.frames[agent].append(observation[agent])

    def deque_to_np(self):
        stack_obs = dict()
        for agent in self.possible_agents:
            stack_obs[agent] = np.stack(self.frames[agent])
        return stack_obs

    def reset(
            self, seed: int | None = None, options: dict | None = None
    ) -> tuple[dict[AgentID, ObsType], dict[AgentID, dict]]:
        observation, info = self.env.reset(seed=seed, options=options)
        for _ in range(self.num_stack):
            self.stack(observation)
        stack_obs = self.deque_to_np()
        return stack_obs, info

    def step(
            self, actions: dict[AgentID, ActionType]
    ) -> tuple[
        dict[AgentID, ObsType],
        dict[AgentID, float],
        dict[AgentID, bool],
        dict[AgentID, bool],
        dict[AgentID, dict],
    ]:
        observation, reward, terminated, truncations, info = self.env.step(actions)
        self.stack(observation)
        stack_obs = self.deque_to_np()
        return stack_obs, reward, terminated, truncations, info

    def observation_space(self, agent: AgentID) -> gym.spaces.Space:
        return self.obs_spaces[agent]

    def single_observation_space(self):
        return self.observation[self.env.possible_agent[0]]



if __name__ == "__main__":
    env = create_pettingzoo_env("double-dunk")
    env = ResizeObservationWrapper(env, shape=(84, 84))
    env = GrayScaleObservationWrapper(env)
    env = FrameStackWrapper(env, num_stack=4)
    for agent in env.possible_agents:
        print(env.observation_space(agent))
    observation, info = env.reset()
    for agent in env.possible_agents:
        print(observation[agent].shape)
    for i in range(10):
        actions = {agent: env.action_space(agent).sample() for agent in env.agents}
        observation, reward, terminated, truncations, info = env.step(actions)
        print(reward, terminated, truncations)
