import numpy as np

import Mobility_env
import without_Mobility_env
from Mobility_env import Mobility
from without_DRL import without_DeepQNetwork
from without_Mobility_env import without_Mobility
from RL_brain import DeepQNetwork
import matplotlib.pyplot as plt
import tensorflow

reward_total = 0
Last_action = 0

g = 4
jiange_reward = 10
jiange_latency = 10
episode_num = 2000

reward_save = np.zeros(episode_num)
Time_save = np.zeros(episode_num)

y_reward_save_without = np.linspace(0, episode_num, int(episode_num / jiange_reward) + 1)
y_Time_save_without = np.linspace(0, episode_num, int(episode_num / jiange_latency) + 1)

y_reward_save_linear = np.linspace(0, episode_num, int(episode_num / jiange_reward) + 1)
y_Time_save_linear = np.linspace(0, episode_num, int(episode_num / jiange_latency) + 1)

y_reward_save_glimpse = np.linspace(0, episode_num, int(episode_num / jiange_reward) + 1)
y_Time_save_glimpse = np.linspace(0, episode_num, int(episode_num / jiange_latency) + 1)

y_reward_save_seq2seq = np.linspace(0, episode_num, int(episode_num / jiange_reward) + 1)
y_Time_save_seq2seq = np.linspace(0, episode_num, int(episode_num / jiange_latency) + 1)

y_reward_save_perfect = np.linspace(0, episode_num, int(episode_num / jiange_reward) + 1)
y_Time_save_perfect = np.linspace(0, episode_num, int(episode_num / jiange_latency) + 1)

x_reward = np.linspace(0, episode_num, int(episode_num / jiange_reward) + 1)
x_latency = np.linspace(0, episode_num, int(episode_num / jiange_latency) + 1)

reward_glimpse_average = 0
reward_linear_average = 0
reward_without_average = 0
reward_seq2seq_average = 0
reward_perfect_average = 0

Time_glimpse_average = 0
Time_linear_average = 0
Time_without_average = 0
Time_seq2seq_average = 0
Time_perfect_average = 0

averager_episode = 1500


def run_maze(mode):
    global y_reward_save_linear
    global y_Time_save_linear
    global y_reward_save_glimpse
    global y_Time_save_glimpse
    global y_reward_save_seq2seq
    global y_Time_save_seq2seq
    global y_reward_save_perfect
    global y_Time_save_perfect

    global reward_glimpse_average
    global reward_linear_average
    global reward_seq2seq_average
    global reward_perfect_average

    global Time_glimpse_average
    global Time_linear_average
    global Time_seq2seq_average
    global Time_perfect_average
    step = 0
    count_reword_linear = 0
    count_latency_linear = 0
    count_reword_glimpse = 0
    count_latency_glimpse = 0
    count_reword_seq2seq = 0
    count_latency_seq2seq = 0
    count_reword_perfect = 0
    count_latency_perfect = 0

    for episode in range(episode_num):
        observation = env.reset()
        while True:
            # fresh env
            # env.render()
            # RL choose action based on observation
            if mode == 'glimpse_mobility':
                action = RL.choose_action(observation, step,mode)
            elif mode == 'linear_mobility':
                action = RL_linear.choose_action(observation, step,mode)
            elif mode == 'seq2seq_mobility':
                action = RL_seq2seq.choose_action(observation, step,mode)
            elif mode == 'perfect_mobility':
                action = RL_perfect.choose_action(observation, step,mode)
            # RL take action and get next observation and reward
            observation_, reward, done = env.step(action, episode)
            global reward_total
            reward_total = reward_total + reward
            if mode == 'glimpse_mobility':
                RL.store_transition(observation, action, reward, observation_)
            elif mode == 'linear_mobility':
                RL_linear.store_transition(observation, action, reward, observation_)
            elif mode == 'seq2seq_mobility':
                RL_seq2seq.store_transition(observation, action, reward, observation_)
            elif mode == 'perfect_mobility':
                RL_perfect.store_transition(observation, action, reward, observation_)
            # if step > 200 and step % 4 == 0:
            if step % 4 == 0:
                if mode == 'glimpse_mobility':
                    RL.learn(step)
                elif mode == 'linear_mobility':
                    RL_linear.learn(step)
                elif mode == 'seq2seq_mobility':
                    RL_seq2seq.learn(step)
                elif mode == 'perfect_mobility':
                    RL_perfect.learn(step)
            # swap observation
            observation = observation_
            # break while loop when end of this episode
            if done:
                print("episode", episode)
                print("reward_total", reward_total / Mobility_env.file_num)
                # print("Time_save", Time_save / Mobility_env.file_num)
                Time_save[episode] = Mobility_env.Time_
                reward_save[episode] = reward_total
                if mode == 'linear_mobility':
                    if episode % jiange_reward == 0:
                        y_reward_save_linear[count_reword_linear] = reward_save[episode]
                        count_reword_linear = count_reword_linear + 1
                        y_reward_save_linear[count_reword_linear] = reward_save[episode]
                    if episode % jiange_latency == 0:
                        y_Time_save_linear[count_latency_linear] = Time_save[episode]
                        count_latency_linear = count_latency_linear + 1
                        y_Time_save_linear[count_latency_linear] = Time_save[episode]
                    if episode > averager_episode:
                        reward_linear_average = (reward_linear_average * (episode - averager_episode - 1) +
                                                 reward_save[
                                                     episode]) / (episode - averager_episode)
                        Time_linear_average = (Time_linear_average * (episode - averager_episode - 1) + Time_save[
                            episode]) / (episode - averager_episode)
                elif mode == 'glimpse_mobility':
                    if episode % jiange_reward == 0:
                        y_reward_save_glimpse[count_reword_glimpse] = reward_save[episode]
                        count_reword_glimpse = count_reword_glimpse + 1
                        y_reward_save_glimpse[count_reword_glimpse] = reward_save[episode]
                    if episode % jiange_latency == 0:
                        y_Time_save_glimpse[count_latency_glimpse] = Time_save[episode]
                        count_latency_glimpse = count_latency_glimpse + 1
                        y_Time_save_glimpse[count_latency_glimpse] = Time_save[episode]
                    if episode > averager_episode:
                        reward_glimpse_average = (reward_glimpse_average * (episode - averager_episode - 1) +
                                                  reward_save[
                                                      episode]) / (episode - averager_episode)
                        Time_glimpse_average = (Time_glimpse_average * (episode - averager_episode - 1) + Time_save[
                            episode]) / (episode - averager_episode)
                elif mode == 'seq2seq_mobility':
                    if episode % jiange_reward == 0:
                        y_reward_save_seq2seq[count_reword_seq2seq] = reward_save[episode]
                        count_reword_seq2seq = count_reword_seq2seq + 1
                        y_reward_save_seq2seq[count_reword_seq2seq] = reward_save[episode]
                    if episode % jiange_latency == 0:
                        y_Time_save_seq2seq[count_latency_seq2seq] = Time_save[episode]
                        count_latency_seq2seq = count_latency_seq2seq + 1
                        y_Time_save_seq2seq[count_latency_seq2seq] = Time_save[episode]
                    if episode > averager_episode:
                        reward_seq2seq_average = (reward_seq2seq_average * (episode - averager_episode - 1) +
                                                  reward_save[
                                                      episode]) / (episode - averager_episode)
                        Time_seq2seq_average = (Time_seq2seq_average * (episode - averager_episode - 1) + Time_save[
                            episode]) / (episode - averager_episode)


                elif mode == 'perfect_mobility':
                    if episode % jiange_reward == 0:
                        y_reward_save_perfect[count_reword_perfect] = reward_save[episode]
                        count_reword_perfect = count_reword_perfect + 1
                        y_reward_save_perfect[count_reword_perfect] = reward_save[episode]
                    if episode % jiange_latency == 0:
                        y_Time_save_perfect[count_latency_perfect] = Time_save[episode]
                        count_latency_perfect = count_latency_perfect + 1
                        y_Time_save_perfect[count_latency_perfect] = Time_save[episode]
                    if episode > averager_episode:
                        reward_perfect_average = (reward_perfect_average * (episode - averager_episode - 1) +
                                                  reward_save[
                                                      episode]) / (episode - averager_episode)
                        Time_perfect_average = (Time_perfect_average * (episode - averager_episode - 1) + Time_save[
                            episode]) / (episode - averager_episode)
                reward_total = 0
                break
            step += 1
    # y_reward_save_without[count_reword] = reward_save[episode]
    # y_reward_save_without[count_latency] = Time_save[episode]
    # import matplotlib.pyplot as plt
    print("reward_glimpse_average=", reward_glimpse_average)
    print("reward_linear_average=", reward_linear_average)
    print("reward_seq2seq_average=", reward_seq2seq_average)
    print("reward_perfect_average=", reward_perfect_average)

    print("Time_glimpse_average=", Time_glimpse_average)
    print("Time_linear_average=", Time_linear_average)
    print("Time_seq2seq_average=", Time_seq2seq_average)
    print("Time_perfect_average=", Time_perfect_average)

    print('game over')
    # if mode == 'linear_mobility':
    #     env.destroy()


def run_maze_without():
    global y_reward_save_without
    global y_Time_save_without
    global reward_without_average
    global Time_without_average
    step = 0
    count_reword_without = 0
    count_latency_without = 0
    for episode in range(episode_num):
        observation = without_env.reset()
        while True:
            # fresh env
            # env.render()
            # RL choose action based on observation
            action = without_RL.choose_action(observation, step)
            # RL take action and get next observation and reward
            observation_, reward, done = without_env.step(action)
            global reward_total
            reward_total = reward_total + reward
            without_RL.store_transition(observation, action, reward, observation_)
            if step % 4 == 0:
                without_RL.learn(step)
            # swap observation
            observation = observation_
            # break while loop when end of this episode
            if done:
                print("episode", episode)
                print("reward_total", reward_total / Mobility_env.file_num)
                # print("Time_save", Time_save / Mobility_env.file_num)
                Time_save[episode] = without_Mobility_env.Time_
                reward_save[episode] = reward_total
                if episode % jiange_reward == 0:
                    y_reward_save_without[count_reword_without] = reward_save[episode]
                    count_reword_without = count_reword_without + 1
                    y_reward_save_without[count_reword_without] = reward_save[episode]
                if episode % jiange_latency == 0:
                    y_Time_save_without[count_latency_without] = Time_save[episode]
                    count_latency_without = count_latency_without + 1
                    y_Time_save_without[count_latency_without] = Time_save[episode]
                if episode > averager_episode:
                    reward_without_average = (reward_without_average * (episode - averager_episode - 1) + reward_save[
                        episode]) / (
                                                     episode - averager_episode)
                    Time_without_average = (Time_without_average * (episode - averager_episode - 1) + Time_save[
                        episode]) / (
                                                   episode - averager_episode)
                reward_total = 0
                break
            step += 1
    print("reward_perfect_average=", reward_perfect_average)
    print("reward_glimpse_average=", reward_glimpse_average)
    print("reward_seq2seq_average=", reward_seq2seq_average)
    print("reward_linear_average=", reward_linear_average)
    print("reward_without_average=", reward_without_average)

    print("Time_perfect_average=", Time_perfect_average)
    print("Time_glimpse_average=", Time_glimpse_average)
    print("Time_seq2seq_average=", Time_seq2seq_average)
    print("Time_linear_average=", Time_linear_average)
    print("Time_without_average=", Time_without_average)

    print("完美", 10**Time_perfect_average,"ms")
    print("我的=",10**Time_glimpse_average,"ms")
    print("seq2seq=",10**Time_seq2seq_average,"ms")
    print("线性=", 10**Time_linear_average  ,"ms")
    print("无预测=",10**Time_without_average,"ms")
    print('game over')
    # plt.subplot(2, 2, 1)
    # s = 25
    plt.plot(x_reward, y_reward_save_linear, marker='.' , markersize=5, color='blue', label='linear')
    plt.plot(x_reward, y_reward_save_glimpse, marker='.', markersize=5,color='red', label='Bidirectional LSTM')
    plt.plot(x_reward, y_reward_save_seq2seq, marker='.', markersize=5,color='black', label='seq2seq')
    plt.plot(x_reward, y_reward_save_without, marker='.', markersize=5,color='green', label='without')
    plt.plot(x_reward, y_reward_save_perfect, marker='.', markersize=5,color='yellow', label='perfect')
    plt.xlim(100, episode_num)
    plt.ylim(-100, 400)
    plt.hlines(reward_linear_average, 1, episode_num, color='blue', linestyle='--')
    plt.hlines(reward_glimpse_average, 1, episode_num,color='red',   linestyle='--')
    plt.hlines(reward_seq2seq_average, 1, episode_num,color='black', linestyle='--')
    plt.hlines(reward_without_average, 1, episode_num,color='green', linestyle='--')
    plt.hlines(reward_perfect_average, 1, episode_num,color='yellow',linestyle='--')
    plt.ylabel('reward')
    plt.xlabel('episode')
    plt.legend(loc=0, ncol=2)
    plt.show()
    # plt.subplot(2, 2, 4)
    plt.plot(x_latency, y_Time_save_without, marker='.', markersize=5,color='green', label='without')
    plt.plot(x_latency, y_Time_save_glimpse, marker='.', markersize=5,color='red', label='Bidirectional LSTM')
    plt.plot(x_latency, y_Time_save_seq2seq, marker='.', markersize=5,color='black', label='seq2seq')
    plt.plot(x_latency, y_Time_save_linear, marker='.' , markersize=5,color='blue', label='linear')
    plt.plot(x_latency, y_Time_save_perfect, marker='.', markersize=5,color='yellow', label='perfect')
    plt.xlim(100, episode_num)
    plt.ylim(1.300, 1.315)
    plt.ylabel('latency(ms,log scale)')
    plt.xlabel('episode')
    plt.legend(loc=0, ncol=2)

    plt.show()

    # without_env.destroy()


if __name__ == "__main__":
    # maze game
    env = Mobility()
    Mobility_env.baselines = 'glimpse_mobility'
    print("Mobility_env.baselines", Mobility_env.baselines)
    RL = DeepQNetwork(env.n_actions, env.n_features,
                      learning_rate=15e-5,
                      reward_decay=0.9,
                      e_greedy=1,
                      replace_target_iter=1600,
                      memory_size=12000,
                      # output_graph=True
                      )
    env.after(100, run_maze(Mobility_env.baselines))
    env.mainloop()
    tensorflow.compat.v1.reset_default_graph()
    Mobility_env.baselines = 'linear_mobility'
    print("Mobility_env.baselines", Mobility_env.baselines)
    RL_linear = DeepQNetwork(env.n_actions, env.n_features,
                             learning_rate=15e-5,
                             reward_decay=0.9,
                             e_greedy=1,
                             replace_target_iter=1600,
                             memory_size=12000,
                             # output_graph=True
                             )
    env.after(100, run_maze(Mobility_env.baselines))
    env.mainloop()
    tensorflow.compat.v1.reset_default_graph()
    Mobility_env.baselines = 'seq2seq_mobility'
    print("Mobility_env.baselines", Mobility_env.baselines)
    RL_seq2seq = DeepQNetwork(env.n_actions, env.n_features,
                              learning_rate=15e-5,
                              reward_decay=0.9,
                              e_greedy=1,
                              replace_target_iter=1600,
                              memory_size=12000,
                              # output_graph=True
                              )
    env.after(100, run_maze(Mobility_env.baselines))
    env.mainloop()
    tensorflow.compat.v1.reset_default_graph()
    Mobility_env.baselines = 'perfect_mobility'
    print("Mobility_env.baselines", Mobility_env.baselines)
    RL_perfect = DeepQNetwork(env.n_actions, env.n_features,
                              learning_rate=15e-5,
                              reward_decay=0.9,
                              e_greedy=1,
                              replace_target_iter=1600,
                              memory_size=12000,
                              # output_graph=True
                              )
    env.after(100, run_maze(Mobility_env.baselines))
    env.mainloop()
    tensorflow.compat.v1.reset_default_graph()
    without_env = without_Mobility()
    without_RL = without_DeepQNetwork(without_env.n_actions, without_env.n_features,
                                      learning_rate=15e-5,
                                      reward_decay=0.9,
                                      e_greedy=1.,
                                      replace_target_iter=1600,
                                      memory_size=12000,
                                      # output_graph=True
                                      )
    without_env.after(100, run_maze_without)
    without_env.mainloop()

    print('game over')
    # RL.plot_cost()
