import torch.nn as nn
from torch.nn import functional as F
import numpy as np
import torch

def weights_init_(m):
    # weight init helper function
    if isinstance(m, nn.Linear):
        nn.init.xavier_uniform_(m.weight, gain=1)
        nn.init.constant_(m.bias, 0)

def restrict(data, min, max):
    data = torch.tanh(data)
    data = min + 0.5 * (max - min) * (data + 1)

def soft_update_model1_with_model2(model1, model2, rou):
    """
    used to polyak update a target network
    :param model1: a pytorch model
    :param model2: a pytorch model of the same class
    :param rou: the update is model1 <- rou*model1 + (1-rou)model2
    """
    for model1_param, model2_param in zip(model1.parameters(), model2.parameters()):
        model1_param.data.copy_(rou * model1_param.data + (1 - rou) * model2_param.data)

def test_agent(agent, test_env, max_ep_len, n_eval=1, logger = None):
    ep_return_list = np.zeros(n_eval)
    for j in range(n_eval):
        obs, reward, done, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
        while not (done or (ep_len == max_ep_len)):
            # Take deterministic actions at test time
            action = agent.get_test_action(obs)
            next_obs, reward, done, _ = test_env.step(action)
            ep_ret += reward
            ep_len += 1
            obs = next_obs
        ep_return_list[j] = ep_ret
    return ep_return_list

class Mlp(nn.Module):
    def __init__(self, input_dim, hidden_space, output_dim, hidden_activation = F.relu, use_bn = False):
        super().__init__()
        self.input_dim = input_dim
        self.hidden_space = hidden_space
        self.output_dim = output_dim
        self.hidden_activation = hidden_activation
        self.use_bn = use_bn
        if use_bn:
            self.bn_list = nn.ModuleList()

        self.hidden_layers = nn.ModuleList()
        in_dim = self.input_dim
        for i, next_dim in enumerate(self.hidden_space):
            fc_layer = nn.Linear(in_dim, next_dim)
            if use_bn:
                bn = nn.BatchNorm1d(next_dim)
                self.bn_list.append(bn)
            in_dim = next_dim
            self.hidden_layers.append(fc_layer)
            

        self.lastlayer = nn.Linear(in_dim, self.output_dim)
        self.apply(weights_init_)

    def forward(self, input):
        for i, lay in enumerate(self.hidden_layers):
            input = lay(input)
            if self.use_bn:
                input = self.bn_list[i](input)
            input = self.hidden_activation(input)
        output = self.lastlayer(input)
        return output

