from env_self import Environment
from ppo_test import Agent
import numpy as np




def update():
    # Resulted list for the plotting Episodes via Steps
    steps = []

    # Summed costs for all episodes in resulted list
    all_costs = []

    score_history = []

    learn_iters = 0
    avg_score = 0
    best_score = -100

    for episode in range(2000):
        # Initial Observation
        observation = env.reset() #将机器人放在起始点处并清空d字典,agent 坐标
        #done = False
        score = 0
        # Updating number of Steps for each Episode
        bb = str(observation)
        i = 0

        while True:

            env.render()
            # RL chooses action based on observation当前机器人的坐标位置
            action,prob,val = agent.choose_action(observation) #寻找动作的依据为以一定概率选择目前状态下动作值函数最大的动作，以一定概率随机选择（随机选择的目的是增加探索率）

            # RL takes an action and get the next observation and reward
            observation_, reward, done = env.step(action) #将该动作执行，得到奖励值，下个状态以及是否结束寻路标志

            score += reward

            # Calculating number of Steps in the current Episode, the steps of agent
            i += 1

            agent.remember(observation,action,prob,val,reward,done) # 仔细检查remember机制，查看问题
            aa = N
            #if i % N == 0:
            agent.learn()
            learn_iters += 1
            # Swapping the observations - current and next
            observation = observation_
            score_history.append(score)
            avg_score = np.mean(score_history[-100:])
            if done:
                steps += [i]
                break

        '''
            # Break while loop when it is the end of current Episode
            # When agent reached the goal or obstacle, break the loop
            if done:
                steps += [i]
                all_costs += [cost]
                break
        '''
        if avg_score > best_score:
            best_score = avg_score
            agent.save_models()

        print('episode', episode, 'score %.1f' % score, 'avg score %.1f' % avg_score,
                'time_steps', i, 'learning_steps', learn_iters)
    # Showing the final route
    #env.final()



if __name__ == "__main__":

    N = 20
    batch_size = 5
    n_epochs = 4
    alpha = 0.00005
    env = Environment() #环境初始化
    agent = Agent(n_actions=env.n_actions, batch_size=batch_size,
                    alpha=alpha, n_epochs=n_epochs,
                    input_dims=len(env.action_space))
    env.after(100,update)
    env.mainloop()