import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import itertools
import collections
import random

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class Swish(nn.Module):
    '''Swish激活函数'''
    def __init__(self):
        super(Swish, self).__init__()

    def forward(self, x):
        return x * torch.sigmoid(x)

def init_weights(m):
    '''初始化模型权重'''
    def truncated_normal_init(t,mean=0.0,std=0.01):
        torch.nn.init.normal_(t,mean=mean,std=std)
        while True:
            cond = (t < mean-2*std) | (t > mean+2*std)
            if not torch.sum(cond):
                break
            t = torch.where(cond,torch.nn.init.normal_(torch.ones(t.shape,device=device),mean=mean,std=std),t)
        return t

    if type(m) == nn.Linear or isinstance(m,FCLayer):
        truncated_normal_init(m.weight,std=1/(2*np.sqrt(m._input_dim)))
        m.bias.data.fill_(0.0)

class FCLayer(nn.Module):
    '''集成之后的全连接层'''
    def __init__(self,input_dim,output_dim,ensemble_size,activation):
        super(FCLayer, self).__init__()
        self._input_dim,self._output_dim = input_dim,output_dim
        self.weight = nn.Parameter(torch.Tensor(ensemble_size,input_dim,output_dim).to(device))
        self._activation = activation
        self.bias = nn.Parameter(torch.Tensor(ensemble_size,output_dim).to(device))

    def forward(self, x):
        return self._activation(torch.add(torch.bmm(x,self.weight),self.bias[:,None,:]))

class EnsembleModel(nn.Module):
    '''使用高斯分布的概率模型定义一个集成模型'''
    def __init__(self,state_dim,action_dim,model_alpha,ensemble_size=5,learning_rate=1e-3):
        super(EnsembleModel, self).__init__()
        # 输出包括均值和方差，因此是状态与奖励维度之和的两倍
        self._output_dim = (state_dim + 1)*2
        self._model_alpha = model_alpha  # 模型损失函数中加权时的权重
        self._max_logvar = nn.Parameter((torch.ones((1,self._output_dim // 2)).float() / 2).to(device),requires_grad=False)
        self._min_logvar = nn.Parameter((-torch.ones((1,self._output_dim // 2)).float() * 10).to(device),requires_grad=False)

        self.layer1 = FCLayer(state_dim+action_dim,200,ensemble_size,Swish())
        self.layer2 = FCLayer(200,200,ensemble_size,Swish())
        self.layer3 = FCLayer(200,200,ensemble_size,Swish())
        self.layer4 = FCLayer(200,200,ensemble_size,Swish())
        self.layer5 = FCLayer(200,self._output_dim,ensemble_size,nn.Identity())
        self.apply(init_weights)  # 初始化环境模型中的参数
        self.optimizer = torch.optim.Adam(self.parameters(),lr=learning_rate)

    def forward(self,x,return_log_var=False):
        ret = self.layer5(self.layer4(self.layer3(self.layer2(self.layer1(x)))))
        mean = ret[:,:,:self._output_dim//2]
        # 在PETS算法中，将方差控制在最小值和最大值之间
        logvar = self._max_logvar - F.softplus(self._max_logvar - ret[:,:,self._output_dim//2:])
        logvar = self._min_logvar + F.softplus(logvar - self._min_logvar)
        return mean,logvar if return_log_var else torch.exp(logvar)

    def loss(self,mean,logvar,labels,use_var_loss=True):
        inverse_var = torch.exp(-logvar)
        if use_var_loss:
            mse_loss = torch.mean(torch.mean(torch.pow(mean - labels,2) * inverse_var,dim=-1),dim=-1)
            var_loss = torch.mean(torch.mean(logvar,dim=-1),dim=-1)
            total_loss = torch.sum(mse_loss) + torch.sum(var_loss)
        else:
            mse_loss = torch.mean(torch.pow(mean - labels,2),dim=(1,2))
            total_loss = torch.sum(mse_loss)
        return total_loss,mse_loss

    def train(self,loss):
        self.optimizer.zero_grad()
        loss += self._model_alpha * torch.sum(self._max_logvar)-self._model_alpha*torch.sum(self._min_logvar)
        loss.backward()
        self.optimizer.step()

class EnsembleDynamicsModel:
    '''环境模型集成，加入精细化训练
    具体而言，并不选择模型训练的轮数，而是在每次训练的时候将一部分数据单独取出来，用于验证模型的表现，在5次没有获得表现提升时就结束训练'''
    def __init__(self,state_dim,action_dim,model_alpha=0.01,num_network=5):
        self._num_network = num_network
        self._state_dim,self._action_dim = state_dim,action_dim
        self.model = EnsembleModel(state_dim,action_dim,model_alpha,ensemble_size=num_network)
        self._epoch_since_last_update = 0

    def train(self,inputs,labels,batch_size=64,holdout_ratio=0.1,max_iter=20):
        # 设置训练集与验证集
        permutation = np.random.permutation(inputs.shape[0])
        inputs,labels = inputs[permutation],labels[permutation]
        num_holdout = int(inputs.shape[0]*holdout_ratio)
        train_inputs,train_labels = inputs[num_holdout:], labels[num_holdout:]
        holdout_inputs,holdout_labels = inputs[:num_holdout], labels[:num_holdout]
        holdout_inputs = torch.from_numpy(holdout_inputs).float().to(device)
        holdout_labels = torch.from_numpy(holdout_labels).float().to(device)
        holdout_inputs = holdout_inputs[None,:,:].repeat([self._num_network,1,1])
        holdout_labels = holdout_labels[None,:,:].repeat([self._num_network,1,1])
        # 保留最好的结果
        self._snapshots = {i: (None,1e10) for i in range(self._num_network)}

        for epoch in itertools.count():
            # 定义每一个网络的训练数据
            train_index = np.vstack([np.random.permutation(train_inputs.shape[0]) for _ in range(self._num_network)])
            # 所有真实数据都用来训练
            for batch_start_pos in range(0,train_inputs.shape[0],batch_size):
                batch_index = train_index[:,batch_start_pos:batch_start_pos+batch_size]
                train_input = torch.from_numpy(train_inputs[batch_index]).float().to(device)
                train_label = torch.from_numpy(train_labels[batch_index]).float().to(device)

                mean,logvar = self.model(train_input,return_log_var=True)
                loss,_=self.model.loss(mean,logvar,train_label)
                self.model.train(loss)

            with torch.no_grad():
                mean,logvar = self.model(holdout_inputs,return_log_var=True)
                _,holdout_losses = self.model.loss(mean,logvar,holdout_labels,use_var_loss=False)
                holdout_losses = holdout_losses.cpu()
                break_condition = self.save_best(epoch,holdout_losses)
                if break_condition or epoch > max_iter: # 结束训练
                    break

    def save_best(self,epoch,losses,threshold=0.1):
        updated = False
        for i in range(len(losses)):
            current = losses[i]
            _,best = self._snapshots[i]
            improvement = (best - current) / best
            if improvement > threshold:
                self._snapshots[i] = (epoch,current)
                updated = True
        self._epoch_since_last_update = 0 if updated else self._epoch_since_last_update + 1
        return self._epoch_since_last_update > 5

    def predict(self,inputs,batch_size=64):
        inputs = np.tile(inputs,(self._num_network,1,1))
        inputs = torch.tensor(inputs,dtype=torch.float).to(device)
        mean,var = self.model(inputs,return_log_var=False)
        return mean.detach().cpu().numpy(),var.detach().cpu().numpy()

class FakeEnv:
    '''有了环境模型后，定义一个FakeEnv，主要用于实现给定状态和动作，用模型集成来预测，该功能会用在MPC中'''
    def __init__(self,model):
        self.model = model

    def step(self,obs,act):
        inputs = np.concatenate((obs,act),axis=-1)
        ensemble_model_means,ensemble_model_vars = self.model.predict(inputs)
        ensemble_model_means[:,:,1:] += obs
        ensemble_model_stds = np.sqrt(ensemble_model_vars)
        ensemble_samples = ensemble_model_means + np.random.normal(size=ensemble_model_means.shape) * ensemble_model_stds

        num_models,batch_size,_ = ensemble_model_means.shape
        models_to_use = np.random.choice([i for i in range(self.model._num_network)],size=batch_size)
        batch_inds = np.arange(0,batch_size)
        samples = ensemble_samples[models_to_use,batch_inds]
        rewards,next_obs = samples[:,:1][0][0],samples[:,1:][0]
        return rewards,next_obs

class ReplayBuffer:
    def __init__(self,capacity):
        self.buffer = collections.deque(maxlen=capacity)

    def add(self,state,action,reward,next_state,done):
        self.buffer.append((state,action,reward,next_state,done))

    def size(self):
        return len(self.buffer)

    def sample(self,batch_size):
        if batch_size > len(self.buffer):
            return self.return_all_samples()
        else:
            transitions = random.sample(self.buffer,batch_size)
            state,action,reward,next_state,done = zip(*transitions)
            return np.array(state),action,reward,np.array(next_state),done

    def return_all_samples(self):
        all_transitions = list(self.buffer)
        state,action,reward,next_state,done = zip(*all_transitions)
        return np.array(state),action,reward,np.array(next_state),done
