import random

import numpy as np
import torch
import torch.nn.functional as F
import wandb

from torch import nn
from torch.optim import Adam

from env.env2_v2 import MECEnv
import utils


def norm_state(state):
    # return state / torch.tensor([[3000, 4, 1, 5, 1, 20e5, 2]*user_num]).to(device)
    return state / torch.tensor([[1, 1, 20e5, 2] * user_num]).to(device)


class SequenceReplayBuffer:
    def __init__(self, obs_size, max_size=2000, batch_size=32, max_sequence_len=20):
        """Replay buffer that stores sequences rather than just transitions."""
        self.obs = np.zeros((max_size, max_sequence_len, obs_size), "float32")
        self.rewards = np.zeros((max_size, max_sequence_len), "float32")
        self.act = np.zeros((max_size, max_sequence_len), "int64")
        self.dones = np.zeros((max_size, max_sequence_len), "float32")
        self.zero_mask = np.zeros((max_size, max_sequence_len), "float32")

        # Store prev obs
        self.prev_obs = None

        # Counters
        self.t = 0
        self.counter = 0

        # Sizes
        self.max_size = max_size
        self.batch_size = batch_size
        self.max_sequence_len = max_sequence_len

    def push_first(self, first_obs):
        self.prev_obs = first_obs

    def push(self, next_obs, action, reward, done):
        idx = self.counter % self.max_size
        self.obs[idx, self.t] = self.prev_obs
        self.act[idx, self.t] = action
        self.rewards[idx, self.t] = reward
        self.dones[idx, self.t] = done
        self.zero_mask[idx, self.t] = 1

        # Step forward in time
        self.t += 1
        self.prev_obs = next_obs

        # End of the episode
        if done and self.t < self.max_sequence_len:
            self.zero_mask[idx, self.t:] *= 0.0
            self.t = self.max_sequence_len

        # Move to next sequence
        if self.t >= self.max_sequence_len:
            self.counter += 1
            self.zero_mask[self.counter % self.max_size, :] *= 0.0
            self.t = 0

    def is_ready(self):
        return self.counter >= self.batch_size

    def sample(self):
        max_idx = min(self.counter, self.max_size)
        idxs = np.random.randint(0, max_idx, self.batch_size)

        obs_batch = torch.from_numpy(self.obs[idxs])
        act_batch = torch.from_numpy(self.act[idxs])
        rew_batch = torch.from_numpy(self.rewards[idxs])
        done_batch = torch.from_numpy(self.dones[idxs])
        zero_mask_batch = torch.from_numpy(self.zero_mask[idxs])

        return obs_batch, act_batch, rew_batch, done_batch, zero_mask_batch


class DQNRNN(nn.Module):
    def __init__(self, input_size, hidden_size, num_lstm, out_features):
        super(DQNRNN, self).__init__()
        self.h_dim = hidden_size
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.lstm = nn.LSTM(hidden_size, hidden_size, num_lstm, batch_first=True)
        self.head = nn.Linear(hidden_size, out_features)

    def forward(self, obs, hidden_state, targ_device=torch.device("cpu")):
        hn, cn = hidden_state
        obs = norm_state(obs)
        x = F.gelu(self.fc1(obs))
        x, (hn, cn) = self.lstm(x, (hn, cn))
        return self.head(x), (hn, cn)

    def reset_hidden_state(self, batch_size):
        # https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html
        if batch_size == 1:
            size = (1, self.h_dim)
        else:
            size = (1, batch_size, self.h_dim)
        h0 = torch.zeros(size, dtype=torch.float32).to(device)
        c0 = torch.zeros(size, dtype=torch.float32).to(device)
        return (h0, c0)


class Agent:
    def __init__(
            self,
            obs_size,
            num_actions,
            num_rnn=1,
            rnn_hidden_dim=8,
            lr=5e-3,
            gamma=0.99,
            target_update=200,
            eps_min=0.05,
            eps_decay_steps=20_000

    ):
        # Parameters
        self.obs_size = obs_size
        self.num_actions = num_actions
        self.gamma = gamma
        self.target_update = target_update
        self.eps_decay_steps = eps_decay_steps
        self.eps_min = eps_min

        # Networks
        network_params = {
            'input_size': obs_size,
            'hidden_size': rnn_hidden_dim,
            'num_lstm': num_rnn,
            'out_features': num_actions,
        }
        self.q_net = DQNRNN(**network_params).to(device)
        self.target_q_net = DQNRNN(**network_params).to(device)
        self.target_q_net.load_state_dict(self.q_net.state_dict())
        self.target_q_net.eval()

        # Optimiser
        self.optimizer = Adam(self.q_net.parameters(), lr=lr)

        # Counters
        self.learn_step = 0
        self.act_step = 0

    def reset(self, batch_size):
        self.hidden_state = self.q_net.reset_hidden_state(batch_size)

    def q_learning(self, obs, act, rew, done, mask):
        obs = obs.to(device)
        act = act.to(device)
        rew = rew.to(device)
        done = done.to(device)
        mask = mask.to(device)

        B, T = act.shape  # (batch_size, sequence_length)

        # Unroll online network
        hidden_state = self.q_net.reset_hidden_state(B)
        q_values, _ = self.q_net.forward(obs, hidden_state)

        # Extract q_value of chose action
        act_q_value = q_values.gather(2, act.unsqueeze(2)).squeeze()

        with torch.no_grad():
            # Unroll target network
            hidden_state = self.target_q_net.reset_hidden_state(B)
            target_q_values, _ = self.target_q_net.forward(obs, hidden_state)

            # Double Q-learning
            target_act = q_values.max(2)[1]  # (batch_size, seq_len, actions)
            target_q_value = target_q_values.gather(2, target_act.unsqueeze(2)).squeeze().detach()

        # Extract the timesteps we want
        act_q_value = act_q_value[:, :T - 1]  # chop off last timestep
        act = act[:, :-1]
        rew = rew[:, :-1]
        done = done[:, :-1]
        mask = mask[:, :-1]
        target_q_value = target_q_value[:, 1:]  # chop of first timestep

        # Bellman target
        bellman_target = rew + self.gamma * (1 - done) * target_q_value

        # Compute masked loss
        squared_td_error = (bellman_target - act_q_value) ** 2
        loss = torch.sum(squared_td_error * mask) / torch.sum(mask)

        # Optimize the model
        self.optimizer.zero_grad()
        loss.backward()
        # for param in Q.parameters():
        #     param.grad.data.clamp_(-1, 1)
        self.optimizer.step()

        # Update target net
        if self.learn_step % self.target_update == 0:
            self.target_q_net.load_state_dict(self.q_net.state_dict())

        # Increment counter
        self.learn_step += 1

        return loss.detach().item()

    def select_action(self, obs, greedy=False, evaluation=False):
        # Have to pass through network to update hidden state
        with torch.no_grad():
            obs = torch.from_numpy(obs).unsqueeze(0).to(device)
            q_values, self.hidden_state = self.q_net(obs, self.hidden_state)

        # Get Epsilon
        epsilon = max(self.eps_min, 1.0 - self.act_step / self.eps_decay_steps)

        # Greedy action selection vs exploration
        if random.random() > epsilon or greedy:
            a = q_values.max(1)[1].view(1, 1).item()
        else:
            a = np.random.randint(0, self.num_actions, size=1)[0]

        # Increment counter
        self.act_step += 1

        return a


if __name__ == "__main__":
    if torch.cuda.is_available():
        device = torch.device("cuda")
        print("cuda")
    else:
        device = torch.device("cpu")
        print("cpu")

    # Hyperparameters
    num_episodes = 10_000
    batch_size = 128
    seed = 0

    # for reproducability
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)

    # Initialise env, agent, memory
    user_num = 4
    server_num = 4
    env = MECEnv.env(user_num, server_num, 1, 0)
    lr = 5e-4
    agent = Agent(obs_size=env.observation_dim * user_num, num_actions=env.action_dim ** user_num, rnn_hidden_dim=20,
                  lr=lr)
    mem = SequenceReplayBuffer(obs_size=env.observation_dim * user_num, max_size=10_000, batch_size=batch_size)

    ep_returns = []

    with wandb.init(project="PomdpEnv_1_0", name=f"drqn_pomdp_{user_num}_{server_num}_{lr}"):
        for e in range(num_episodes):
            _, obs = env.reset()
            obs = obs.flatten()

            done = False
            agent.reset(1)
            mem.push_first(obs)

            ep_return = 0
            ep_policy_loss = 0
            ep_delay = 0
            ep_consumption = 0
            while not done:
                # Step actor

                action = agent.select_action(obs)

                # Step environment
                _, rew, done, _, delay, consumption, obs = env.step(utils.translateAction(action, user_num, server_num))
                obs = obs.flatten()

                # Push transition to memory
                mem.push(obs, action, rew, done)

                # Add reward to return
                ep_return += rew
                ep_delay += delay
                ep_consumption +=consumption

                # Do some learning
                if mem.is_ready():
                    # Sample memory
                    train_obs, train_act, train_rew, train_done, train_mask = mem.sample()
                    policy_loss = agent.q_learning(train_obs, train_act, train_rew, train_done, train_mask)
                    ep_policy_loss += policy_loss

            # Add ep_return to list
            #wandb.log({"Episode Reward": np.mean(ep_returns[-50:]), "Policy Loss": ep_policy_loss,"delay":ep_delay,"energy consumption":ep_consumption})
            wandb.log({"Episode Reward": ep_return, "Policy Loss": ep_policy_loss,"delay":ep_delay,"energy consumption":ep_consumption})
            ep_returns.append(ep_return)

            # Logging
            if e % 1 == 0:
                epsilon = max(agent.eps_min, 1.0 - agent.act_step / agent.eps_decay_steps)
                print(
                    "Episode", e,
                    "Avg. Episode return:", ep_return,
                    "Epsilon", round(float(epsilon), 3),
                    "Train steps:", int(agent.learn_step),
                    "Timesteps", int(agent.act_step),
                    sep='\t'
                )

