import random
import numpy as np
import torch
from environment import MultiAgentSimpleEnv
from maddpg import MADDPG

def main():
    num_agents = 2
    obs_dim = 4
    action_dim = 2
    num_episodes = 1000
    max_steps = 25
    batch_size = 128

    env = MultiAgentSimpleEnv(num_agents=num_agents, obs_dim=obs_dim, action_dim=action_dim, max_steps=max_steps)
    obs_dims = [obs_dim for _ in range(num_agents)]
    action_dims = [action_dim for _ in range(num_agents)]
    maddpg = MADDPG(num_agents, obs_dims, action_dims,
                    actor_lr=1e-3, critic_lr=1e-3, gamma=0.95, tau=0.01,
                    buffer_capacity=100000)

    total_steps = 0
    for episode in range(num_episodes):
        obs = env.reset()
        episode_rewards = np.zeros(num_agents)
        for step in range(max_steps):
            actions = maddpg.select_action(obs)
            next_obs, rewards, dones, _ = env.step(actions)
            maddpg.buffer.add(obs, actions, rewards, next_obs, dones)
            obs = next_obs
            episode_rewards += np.array(rewards)
            total_steps += 1
            maddpg.update(batch_size)
            if all(dones):
                break
        print(f"Episode: {episode}, Rewards: {episode_rewards}")

if __name__ == '__main__':
    random.seed(0)
    np.random.seed(0)
    torch.manual_seed(0)
    main()
