import gym
import numpy as np

class MultiAgentSimpleEnv(gym.Env):
    metadata = {'render.modes': ['human']}

    def __init__(self, num_agents=2, obs_dim=4, action_dim=2, max_steps=25):
        super(MultiAgentSimpleEnv, self).__init__()
        self.num_agents = num_agents
        self.obs_dim = obs_dim
        self.action_dim = action_dim
        self.max_steps = max_steps
        self.current_step = 0
        self.observation_space = [
            gym.spaces.Box(low=-np.inf, high=np.inf, shape=(obs_dim,), dtype=np.float32)
            for _ in range(num_agents)
        ]
        self.action_space = [
            gym.spaces.Box(low=-1.0, high=1.0, shape=(action_dim,), dtype=np.float32)
            for _ in range(num_agents)
        ]
        self.state = [np.zeros(obs_dim, dtype=np.float32) for _ in range(num_agents)]

    def reset(self):
        self.current_step = 0
        self.state = [np.random.uniform(-1, 1, self.obs_dim).astype(np.float32)
                      for _ in range(self.num_agents)]
        return self.state

    def step(self, actions):
        self.current_step += 1
        next_state = []
        rewards = []
        dones = []
        for i in range(self.num_agents):
            action = actions[i]
            if len(action) < self.obs_dim:
                action = np.pad(action, (0, self.obs_dim - len(action)), 'constant')
            noise = np.random.randn(self.obs_dim) * 0.1
            ns = self.state[i] + action + noise
            next_state.append(ns.astype(np.float32))
            reward = -np.sum(np.square(actions[i]))
            rewards.append(reward)
            dones.append(self.current_step >= self.max_steps)
        self.state = next_state
        done = all(dones)
        return next_state, rewards, [done] * self.num_agents, {}

    def render(self, mode='human'):
        pass

    def close(self):
        pass
