import itertools

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")


def init_weights(m: nn.Module):
    """初始化模型权重"""

    def truncated_normal_init(t, mean=0.0, std=0.01):
        torch.nn.init.normal_(t, mean, std)
        while True:
            cond = (t < mean - 2 * std) | (t > mean + 2 * std)
            if not torch.sum(cond):
                break
            t = torch.where(cond, torch.nn.init.normal_(torch.ones(t.shape, device=device), mean=mean, std=std), t)
        return t

    if type(m) == nn.Linear or isinstance(m, FCLayer):
        truncated_normal_init(m.weight, std=1 / (2 * np.sqrt(m._input_dim)))
        m.bias.data.fill_(0.0)


class Swish(nn.Module):
    """相当于一个激活层"""

    def __init__(self):
        super(Swish, self).__init__()

    def forward(self, x):
        return x * torch.sigmoid(x)


class FCLayer(nn.Module):
    """集成之后的全连接层"""

    def __init__(self, input_dim, output_dim, ensemble_size, activation):
        super(FCLayer, self).__init__()
        self._input_dim, self._output_dim = input_dim, output_dim
        self.weight = nn.Parameter(torch.Tensor(ensemble_size, input_dim, output_dim).to(device))
        self._activation = activation
        self.bias = nn.Parameter(torch.Tensor(ensemble_size, output_dim).to(device))

    def forward(self, x):
        # bmm()：计算两个tensor的矩阵乘法
        return self._activation(torch.add(torch.bmm(x, self.weight), self.bias[:, None, :]))

class DynamicModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, ensemble_size=5):
        super(DynamicModel, self).__init__()
        self.layer1 = FCLayer(input_dim, hidden_dim, ensemble_size, Swish())
        self.layer2 = FCLayer(hidden_dim, hidden_dim, ensemble_size, Swish())
        self.layer3 = FCLayer(hidden_dim, hidden_dim, ensemble_size, Swish())
        self.layer4 = FCLayer(hidden_dim, hidden_dim, ensemble_size, Swish())
        self.layer5 = FCLayer(hidden_dim, output_dim, ensemble_size, nn.Identity())
        self.apply(init_weights)

    def forward(self, data):
        return self.layer5(self.layer4(self.layer3(self.layer2(self.layer1(data)))))


class EnsembleModel(nn.Module):
    """环境模型集成（Probabilistic Ensemble with Gaussian)"""

    def __init__(self, state_dim, action_dim, ensemble_size=5, learning_rate=1e-3):
        super(EnsembleModel, self).__init__()
        # 输出包括均值和方差，因此是状态与奖励维度之和的两倍
        self._output_dim = (state_dim + 1) * 2
        self._max_logvar = nn.Parameter((torch.ones((1, self._output_dim // 2)).float() / 2).to(device),
                                        requires_grad=False)
        self._min_logvar = nn.Parameter((-torch.ones((1, self._output_dim // 2)).float() * 10).to(device),
                                        requires_grad=False)

        # self.layer1 = FCLayer(state_dim + action_dim, 200, ensemble_size, Swish())
        # self.layer2 = FCLayer(200, 200, ensemble_size, Swish())
        # self.layer3 = FCLayer(200, 200, ensemble_size, Swish())
        # self.layer4 = FCLayer(200, 200, ensemble_size, Swish())
        # self.layer5 = FCLayer(200, self._output_dim, ensemble_size, nn.Identity())

        self.dynamic_net = nn.ModuleList()
        for i in range(state_dim):
            self.dynamic_net.append(DynamicModel(state_dim+action_dim, 64, 2).to(device))

        self.reward_net = nn.Sequential(DynamicModel(state_dim+action_dim, 64, 2)).to(device)

        # 初始化环境模型中的参数
        # self.apply(init_weights)
        self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
        # self.dynamic_opti = torch.optim.Adam(self.dynamic_net.parameters(), lr=learning_rate)
        # self.reward_opti = torch.optim.Adam(self.reward_net.parameters(), lr=learning_rate)

        self.state_dim = state_dim
        self.action_dim = action_dim

    def forward(self, state_action, return_log_var=False):
        # (networks, batch_size, ouput_dim)
        # ret = torch.empty((state_action.shape[0], state_action.shape[1], self._output_dim)).to(device)
        mean = torch.empty((state_action.shape[0], state_action.shape[1], self.state_dim + 1)).to(device)
        var = torch.empty((state_action.shape[0], state_action.shape[1], self.state_dim + 1)).to(device)
        for i in range(self.state_dim):
            tmp_result = self.dynamic_net[i](state_action)
            mean[:, :, i] = tmp_result[:, :, 0]
            var[:, :, i] = tmp_result[:, :, 1]
        reward_result = self.reward_net(state_action)
        mean[:, :, -1] = reward_result[:, :, 0]
        var[:, :, -1] = reward_result[:, :, 1]
        # ret = self.layer5(self.layer4(self.layer3(self.layer2(self.layer1(state_action)))))
        # mean = ret[:, :, :self._output_dim // 2]
        # 在PET算法中，将方差控制在最小值和最大值之间
        # logvar = self._max_logvar - F.softplus(self._max_logvar - ret[:, :, self._output_dim // 2:])
        logvar = self._max_logvar - F.softplus(self._max_logvar - var)
        logvar = self._min_logvar + F.softplus(logvar - self._min_logvar)
        return mean, logvar if return_log_var else torch.exp(logvar)

    def loss(self, mean, logvar, lables, use_var_loss=True):
        inverse_var = torch.exp(-logvar)
        if use_var_loss:
            mse_loss = torch.mean(torch.mean(torch.pow(mean - lables, 2) * inverse_var, dim=-1), dim=-1)
            var_loss = torch.mean(torch.mean(logvar, dim=-1), dim=-1)
            total_loss = torch.sum(mse_loss) + torch.sum(var_loss)
        else:
            mse_loss = torch.mean(torch.pow(mean - lables, 2), dim=(1, 2))
            total_loss = torch.sum(mse_loss)
        return total_loss, mse_loss

    def train(self, loss):
        self.optimizer.zero_grad()
        # self.reward_opti.zero_grad()
        # self.dynamic_opti.zero_grad()
        loss += 0.01 * torch.sum(self._max_logvar) - 0.01 * torch.sum(self._min_logvar)
        loss.backward()
        self.optimizer.step()
        # self.reward_opti.step()
        # self.dynamic_opti.step()



class EnsembleDynamicsModel:
    """环境模型集成，加入精细化训练  P(s',r'|s,a)"""

    def __init__(self, state_dim, action_dim, num_networks=5):
        self._num_network = num_networks
        self._state_dim, self._action_dim = state_dim, action_dim
        self.model = EnsembleModel(state_dim, action_dim, ensemble_size=num_networks)
        self._epoch_since_last_update = 0

    def train(self, inputs, labels, batch_size=64, holdout_ratio=0.1, max_iter=20):
        # 设置训练集与验证集
        permutation = np.random.permutation(inputs.shape[0])
        # 打乱数据集
        inputs, labels = inputs[permutation], labels[permutation]
        # 验证集大小
        num_holdout = int(inputs.shape[0] * holdout_ratio)
        # 训练集
        train_inputs, train_labels = inputs[num_holdout:], labels[num_holdout:]
        # 验证集
        holdout_inputs, holdout_labels = inputs[:num_holdout], labels[:num_holdout]
        holdout_inputs = torch.from_numpy(holdout_inputs).float().to(device)
        holdout_labels = torch.from_numpy(holdout_labels).float().to(device)
        # repeat()可以对张量进行重复扩充, 当参数有三个时：（通道数的重复倍数，列的重复倍数，行的重复倍数）。
        holdout_inputs = holdout_inputs[None, :, :].repeat([self._num_network, 1, 1])
        holdout_labels = holdout_labels[None, :, :].repeat([self._num_network, 1, 1])

        # 保留最好的结果
        self._snapshots = {i: (None, 1e10) for i in range(self._num_network)}

        for epoch in itertools.count():
            # 定义每一个网络的训练数据
            train_index = np.vstack([np.random.permutation(train_inputs.shape[0]) for _ in range(self._num_network)])
            # 所有真实数据都用来训练
            for batch_start_pos in range(0, train_inputs.shape[0], batch_size):
                batch_index = train_index[:, batch_start_pos:batch_start_pos + batch_size]
                train_input = torch.from_numpy(train_inputs[batch_index]).float().to(device)
                train_label = torch.from_numpy(train_labels[batch_index]).float().to(device)

                mean, logvar = self.model(train_input, return_log_var=True)
                loss, _ = self.model.loss(mean, logvar, train_label)
                self.model.train(loss)

            # 训练完后，看看相比于之前的训练，效果是否有提升
            with torch.no_grad():
                mean, logvar = self.model(holdout_inputs, return_log_var=True)
                _, holdout_losses = self.model.loss(mean, logvar, holdout_labels, use_var_loss=False)
                holdout_losses = holdout_losses.cpu()
                # 看看是否还有提升,5次没有提升久结束训练
                break_condition = self._save_best(epoch, holdout_losses)
                if break_condition or epoch > max_iter:  # 结束训练
                    break

    def _save_best(self, epoch, losses, threshold=0.1):
        updated = False
        for i in range(len(losses)):
            current = losses[i]
            _, best = self._snapshots[i]
            improvement = (best - current) / best
            if improvement > threshold:
                self._snapshots[i] = (epoch, current)
                updated = True
        self._epoch_since_last_update = 0 if updated else self._epoch_since_last_update + 1
        return self._epoch_since_last_update > 5

    def predict(self, inputs, batch_size=64):
        mean, var = [], []
        for i in range(0, inputs.shape[0], batch_size):
            input_ = torch.from_numpy(inputs[i:min(i + batch_size, inputs.shape[0])]).float().to(device)
            cur_mean, cur_var = self.model(input_[None, :, :].repeat([self._num_network, 1, 1]), return_log_var=False)
            mean.append(cur_mean.detach().cpu().numpy())
            var.append(cur_var.detach().cpu().numpy())
        return np.hstack(mean), np.hstack(var)
