from env import *
from config import *
from model import *


def period_process(env, T, Epsilon):
    record_reward = 0
    for t in range(T):
        for node in range(USER_NODE_NUM):
            is_train, task = env.generate(node)
            if is_train:
                # 获得当前环境
                state = task.prev_state
                action = env.agent.choose_action(state, Epsilon)
                # action_reshape = action_raw.reshape(USER_NODE_NUM, USER_NODE_NUM+MEC_NODE_NUM+1)
                # action = np.argmax(action_reshape, axis=1)
                # print(action)
                # 做动作
                env.step(action, node)
                env.communication()
                # reward = env.calculate_reward(task)
                # state_next = env.get_current_state(node)
                # agent.remember(state, action, state_next, reward)
                # agent.train()
                # record_reward += reward
        if env.finish_task_num != 0:
            print(f"reward {env.reward_record/env.finish_task_num}")
        # env.reward_record = 0
        yield env.sim_env.timeout(SLOT_TIME)
    print("_________________________")
    print(record_reward)
    print("_________________________")


def main():
    T = 1000                   # total time slots (steps)
    agent = DQN()               # creat an agent
    env = WCPNEnv(agent)             # creat an environment

    Epsilon = 0.9

    for episode in range(Episodes_number):
        env.reset()

        Epsilon -= 0.9 / (Episodes_number - Test_episodes_number) # decaying epsilon
        p = 0 # punishment counter

        env.sim_env.process(period_process(env, T, Epsilon))
        env.sim_env.run(until=(T+1)*SLOT_TIME)


if __name__ == '__main__':
    main()