from env.mec_env_v1 import MECEnv
import numpy as np
import wandb

def random_policy(env: MECEnv):
    return np.random.randint(env.action_dim, size=env.user_num)

def no_mig_policy(env, a=0):
    return [a for i in range(env.user_num)]

def always_policy(env: MECEnv):
    return env.nearest_edge_server()



def train(env: MECEnv,  policy, max_episodes: int = 3000, max_steps: int = 3000,):
    '''
    env: mec_env
    max_episodes: 总训练轮数
    max_step: 每轮最大步数
    '''
    rewards = []
    for eps in range(max_episodes):
        env.reset()
        episode_reward = 0
        episode_delay = 0
        episode_consumption = 0
        for step in range(max_steps):
            action = policy(env)
            next_state, reward, done, _ , delay, consumption = env.step(action)

            episode_reward += reward
            episode_delay += delay
            episode_consumption += consumption
            if done:
                break

        print('Episode: ', eps, '| Episode Reward: ', episode_reward, '| Episode Length: ', step)
        wandb.log({"Episode Reward": episode_reward, "delay":episode_delay, "energy consumption":episode_consumption})
        rewards.append(episode_reward)
    # sac_trainer.save_model(model_path)


if __name__ == "__main__":
    user_num = 6
    server_num = 6
    # with wandb.init(project="env_18_0.1", name=f"ALWAYS_{user_num}_{server_num}_2M"):
    #     env = MECEnv.env(user_num, server_num, 18, 0.1)
    #     train(env, always_policy)
    #
    with wandb.init(project="env_18_0.1", name=f"RANDOM_{user_num}_{server_num}_2M"):
        env = MECEnv.env(user_num, server_num, 18, 0.1)
        train(env, random_policy)

    # with wandb.init(project="env2", name="NOMIG"):
    #     env = MECEnv.env(6, 6)
    #     train(env, no_mig_policy)
