import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
import copy

from Memory import Memory
from env.mec_env_v1 import MECEnv
import wandb

'''
env1 :
'''

# hyper-parameters
BATCH_SIZE = 128
# LR = 5e-6 # 0.0005   0.0008 ->140000 0.001 收敛快 w0
#LR = 5e-6  # 5,6
LR = 5e-4
print("LR:",LR)
GAMMA = 0.95
EPISILO = 0.99
MEMORY_CAPACITY = 10000
Q_NETWORK_ITERATION = 100 # copy targetnet的间隔


user_num = 6
server_num = 6
env = MECEnv.env(user_num, server_num, 10, 0.5)
NUM_ACTIONS = env.action_dim
NUM_STATES = env.state_dim
USER_NUM = env.user_num
memory = Memory(MEMORY_CAPACITY, USER_NUM, NUM_STATES)


def reward_scaler_env2(reward, reward_scale = 10):
    # std = reward.std(dim=0)
    # mean = reward.mean(dim=0)
    # reward = reward_scale * (reward - mean) / (std + 1e-6)

    reward = reward + 0.85
    return reward

def norm_state(state):
    return state / torch.tensor([[[3000, 4, 1, 5, 1, 20e5, 2]]])

class Net(nn.Module):
    """docstring for Net"""

    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(NUM_STATES, 128)
        self.fc1.weight.data.normal_(0, 0.1)
        self.fc2 = nn.Linear(128, 128)
        self.fc2.weight.data.normal_(0, 0.1)
        self.out = nn.Linear(128, NUM_ACTIONS)
        self.out.weight.data.normal_(0, 0.1)

    def forward(self, x):
        x = norm_state(x)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.fc2(x)
        x = F.relu(x)
        action_prob = self.out(x)
        return action_prob


class MultiDqn():
    """docstring for DQN"""

    def __init__(self, env: MECEnv):
        self.eval_net, self.target_net = Net(), Net()

        self.learn_step_counter = 0
        # self.memory = np.zeros((MEMORY_CAPACITY, USER_NUM * NUM_STATES * 2 + 1 + USER_NUM))  # num_states state的维度
        self.memory = memory
        # When we store the memory, we put the state, action, reward and next_state in the memory
        # here reward and action is a number, state is a ndarray
        self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)
        self.loss_func = nn.MSELoss()
        self.action_dim = env.action_dim
        self.state_dim = env.state_dim
        self.user_num = env.user_num


    def choose_action(self, state):
        state = torch.unsqueeze(torch.FloatTensor(state), 0)  # get a 1D array
        if np.random.randn() <= EPISILO:  # greedy policy
            # 选择q值最大的action
            action_value = self.eval_net.forward(state)
            action = torch.max(action_value, 2)[1].data.numpy()
            action = action[0]
        else:  # random policy
            action = np.random.randint(self.action_dim, size=self.user_num)
            # print(action)
            # action = action if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)
        return action

    def store_transition(self, state, action, reward, next_state):
        # transition = np.hstack((state, action, reward, next_state))
        # index = self.memory_counter % MEMORY_CAPACITY
        # self.memory[index, :] = transition  # 新的经验
        # self.memory_counter += 1
        memory.appendMemory(state, action, reward, next_state)

    def learn(self):
        # update the parameters
        if self.learn_step_counter % Q_NETWORK_ITERATION == 0:
            self.target_net.load_state_dict(self.eval_net.state_dict())
        self.learn_step_counter += 1

        # sample batch from memory
        sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)  # 先随机出要采样的下标
        # batch_memory = self.memory[sample_index, :] # 再从memory里取出采样的path
        # batch_state = torch.FloatTensor(batch_memory[:, :NUM_STATES])
        # batch_action = torch.LongTensor(batch_memory[:, NUM_STATES:NUM_STATES + 1].astype(int))
        # batch_reward = torch.FloatTensor(batch_memory[:, NUM_STATES + 1:NUM_STATES + 2])
        # batch_next_state = torch.FloatTensor(batch_memory[:, -NUM_STATES:])

        batch_state = torch.FloatTensor(self.memory.state[sample_index, :])
        batch_action = torch.LongTensor(self.memory.action[sample_index, :])
        batch_reward = torch.FloatTensor(self.memory.reword[sample_index])
        batch_next_state = torch.FloatTensor(self.memory.next_state[sample_index, :])

        # q_eval
        # 关键算法步骤
        # q_eval_before = self.eval_net(batch_state)
        # q_eval = q_eval_before.gather(1, batch_action) # 查询得到动作对应的q_val 因为q_eval_before返回的是动作维度的返回值，所以选择一个动作对应的
        # q_next_before = self.target_net(batch_next_state)
        # q_next = q_next_before.detach() # 返回一个新的tensor，从当前计算图中分离下来的，但是仍指向原变量的存放位置,不同之处只是requires_grad为false，得到的这个tensor永远不需要计算其梯度，不具有grad。
        # q_target = batch_reward + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1)
        # loss = self.loss_func(q_eval, q_target)

        q_eval_before = self.eval_net(batch_state)
        batch_action = batch_action.unsqueeze(-1)
        q_eval = q_eval_before.gather(2, batch_action) # 查询得到动作对应的q_val 因为q_eval_before返回的是动作维度的返回值，所以选择一个动作对应的
        # q_eval = self.eval_net(batch_state).gather(1, batch_action)
        q_next = self.target_net(batch_next_state).detach()
        max_q_value = q_next.max(2)[0]
        gama_max_q_value = max_q_value * GAMMA
        # batch_reward = reward_scaler_env2(batch_reward)
        q_target = batch_reward.unsqueeze(-1) + gama_max_q_value
        loss = self.loss_func(q_eval, q_target.unsqueeze(-1))

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        return  loss.item(), q_eval_before.mean().item()



def main():
    with wandb.init(project="env_10_0.5", name=f"DQNM_{user_num}_{server_num}_{LR}"):
        dqn = MultiDqn(env)
        episodes = 2000
        print("Collecting Experience....")
        reward_list = []
        fig, ax = plt.subplots()
        for i in range(episodes):
            state = env.reset()
            ep_reward = 0
            ep_loss = 0
            q_value = 0
            while True:
                action = dqn.choose_action(state)
                next_state, reward, done, _ = env.step(action)
                dqn.store_transition(state, action, reward, next_state)
                ep_reward += reward

                if memory.memory_counter >= MEMORY_CAPACITY:
                    step_loss, step_value = dqn.learn()
                    ep_loss += step_loss
                    q_value += step_value
                    if done:
                        print("episode: {} , the episode reward is {}, ep_loss :{}, q_value： {}".format(i, ep_reward, ep_loss, q_value))
                        wandb.log({"Episode Reward": ep_reward, "Policy Loss": ep_loss, })
                if done:
                    break
                state = next_state
            r = copy.copy(ep_reward)
            reward_list.append(r)

        ax.plot(reward_list)
        plt.show()




if __name__ == '__main__':
    main()
