import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch
import copy
from torch.autograd import Variable
import lib.common as common



class Actor(nn.Module):
    def __init__(self, state_dim, discrete_action_dim, parameter_action_dim, max_action):
        '''
        state_dim: 环境观察的维度
        discrete_action_dim： 离散动作的维度
        parameter_action_dim： 连续动作的维度
        max_action： 动作的最大值
        '''
        super(Actor, self).__init__()

        self.l1 = nn.Linear(state_dim, 256)
        self.l2 = nn.Linear(256, 256)
        self.l3_1 = nn.Linear(256, discrete_action_dim)
        self.l3_2 = nn.Linear(256, parameter_action_dim)

        self.max_action = max_action

    def forward(self, state):
        a = F.relu(self.l1(state))
        a = F.relu(self.l2(a))
        discrete_action = self.max_action * torch.tanh(self.l3_1(a))
        parameter_action = self.max_action * torch.tanh(self.l3_2(a))
        return discrete_action, parameter_action


class Critic(nn.Module):
    def __init__(self, state_dim, discrete_action_dim, parameter_action_dim):
        '''
        state_dim: 环境观察的维度
        discrete_action_dim： 离散动作的维度
        parameter_action_dim： 连续动作的维度
        '''
        super(Critic, self).__init__()

        # 输入环境观察、离散动作、连续动作返回Q值
        self.l1 = nn.Linear(state_dim + discrete_action_dim + parameter_action_dim, 256)
        self.l2 = nn.Linear(256, 256)
        self.l3 = nn.Linear(256, 1)

    def forward(self, state, discrete_action, parameter_action):
        sa = torch.cat([state, discrete_action, parameter_action], 1)
        q = F.relu(self.l1(sa))
        q = F.relu(self.l2(q))
        return self.l3(q)


class DDPG(object):
    def __init__(self, state_dim,
                 discrete_action_dim,
                 parameter_action_dim,
                 max_action,
                 discount=0.99,
                 tau=0.001,
                 actor_lr=3e-4,
                 critic_lr=3e-4,
				 tau_actor=0.005,
				 tau_critic=0.005,
                 ):
        '''
        state_dim: 环境观察的维度
        discrete_action_dim： 离散动作的维度
        parameter_action_dim： 连续动作的维度
        max_action： 动作的最大值
        discount： 折扣因子
        tau： 软更新的参数
        actor_lr： actor网络的学习率
        critic_lr： critic网络的学习率
        tau_actor: todo
        tau_critic: todo
        '''
        print("actor_lr,critic_lr,tau_actor,tau_critic", actor_lr,critic_lr,tau_actor,tau_critic)

        self.discrete_action_dim = discrete_action_dim
        self.parameter_action_dim = parameter_action_dim

        # 离散动作嵌入动作的最大值、最小值和动作嵌入范围
        self.action_max = torch.from_numpy(np.ones((self.discrete_action_dim,))).float().to(device)
        self.action_min = -self.action_max.detach()
        self.action_range = (self.action_max - self.action_min).detach()

        # 连续动作嵌入动作的最大值、最小值和动作嵌入范围
        self.action_parameter_max = torch.from_numpy(np.ones((self.parameter_action_dim,))).float().to(device)
        self.action_parameter_min = -self.action_parameter_max.detach()
        # print(" self.action_parameter_max_numpy", self.action_parameter_max)
        self.action_parameter_range = (self.action_parameter_max - self.action_parameter_min)

        self.actor = Actor(state_dim, discrete_action_dim, parameter_action_dim, max_action).to(device)
        self.actor_target = copy.deepcopy(self.actor)
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=actor_lr)  # 3e-4

        self.critic = Critic(state_dim, discrete_action_dim, parameter_action_dim).to(device)
        self.critic_target = copy.deepcopy(self.critic)
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=critic_lr)
        # 3e-4

        self.discount = discount
        # self.tau = tau

        self.tau_actor = tau_actor
        self.tau_critic = tau_critic

        self.max_action = max_action

        self.total_it = 0 # todo
        self.noise_clip = 0.5 # todo

    def select_action(self, state):
        '''
        state: 环境的观察
        返回：离散动作嵌入、连续动作嵌入
        作用：根据环境的观察选择动作嵌入
        '''
        state = torch.FloatTensor(state.reshape(1, -1)).to(device)
        all_discrete_action, all_parameter_action = self.actor(state)
        return all_discrete_action.cpu().data.numpy().flatten(), all_parameter_action.cpu().data.numpy().flatten()

    def train(self, replay_buffer, action_rep, c_rate, recon_s_rate, batch_size=256):
        '''
        replay_buffer： 经验回放缓冲区
        action_rep： 动作表示模型
        c_rate： 连续动作嵌入空间的边界范围
        recon_s_rate： 重建观察差值损失合理范围
        batch_size： 批量大小
        返回：离散动作重标记率、连续动作重标记率
        '''
        recon_s_rate = recon_s_rate * 5.0 # 这里是为了这个系数的作用是放大质量判断的阈值，使得更多的参数嵌入被认为是"合格"的
        self.total_it += 1
        # Sample replay buffer 采样 离散 不连续
        # discrete_emb：对应真实离散动作的嵌入向量表示
        # state_next_state：表示新旧状态误差
        state, discrete_action, parameter_action, all_parameter_action, discrete_emb, parameter_emb, next_state, state_next_state, reward, not_done = replay_buffer.sample(
            batch_size)
        # print("discrete_emb----------",discrete_emb)
        with torch.no_grad():
            # 获取离散动作的嵌入表示
            discrete_emb_ = action_rep.get_embedding(discrete_action.reshape(1, -1).squeeze().long()).to(device)
            # discrete relable need noise 计算离散动作的噪音量 因为之前约束了符合标准正态分布，可以这么采样噪音
            noise_discrete = (
                    torch.randn_like(discrete_emb_) * 0.1
            ).clamp(-self.noise_clip, self.noise_clip)
            discrete_emb_table = discrete_emb_.clamp(-self.max_action, self.max_action) # 离散动作的嵌入表示，规范了范围
            discrete_emb_table_noise = (discrete_emb_ + noise_discrete).clamp(-self.max_action, self.max_action) # 增加了噪音的离散动作的嵌入表示

            discrete_action_old = action_rep.select_discrete_action(discrete_emb).reshape(-1, 1) # 将嵌入动作向量转换为离散动作的值
            d_new = discrete_action.cpu().numpy()
            d_old = discrete_action_old
            # 这里d_new和d_old的命名存在混淆，d_new是经验回放缓冲区中存储的真实离散动作，d_old是根据嵌入向量重新计算得到的离散动作
            # 可以理解为d_new是旧的，d_old是新的，所以最好重新命名比较好
            d_bing = (d_new == d_old) * 1# 这里是在计算什么？？计算新旧值之间是否匹配的bool矩阵
            # discrete_relable_rate
            discrete_relable_rate = sum(d_bing.reshape(1, -1)[0]) / batch_size # 计算新旧嵌入动作之间总共有多少匹配的动作
            d_bing = torch.FloatTensor(d_bing).float().to(device)
            '''
            用于选择性地替换经验池中的动作嵌入

            # 伪代码表示
            for i in range(batch_size):
                if d_bing[i] == 1:  # 动作匹配
                    discrete_emb_[i] = discrete_emb[i]  # 保持原始嵌入
                else:  # d_bing[i] == 0，动作不匹配
                    discrete_emb_[i] = discrete_emb_table_noise[i]  # 使用重标记的嵌入，表示嵌入质量差，这个是重新根据实际离散动作计算得到含有噪音的离散动作嵌入

            todo 这就有一个问题，实际的动作嵌入也是根据网络预测的，为啥会出现偏差？
            难道是根据训练不断的得到一个更加准确的动作嵌入
            '''
            discrete_emb_ = d_bing * discrete_emb + (1.0 - d_bing) * discrete_emb_table_noise
            # print("discrete_emb_final",discrete_emb_)

            # 预测新旧观察的差值
            predict_delta_state = action_rep.select_delta_state(state, parameter_emb, discrete_emb_table)
            
            # print("predict_delta_state",predict_delta_state)
            # print("state_next_state",state_next_state.cpu().numpy())
            delta_state = (np.square(predict_delta_state - state_next_state.cpu().numpy())).mean(axis=1).reshape(-1, 1) # 计算预测的新旧状态误差和实际的新旧状态误差之间的差值
            # delta_state=predict_delta_state-state_next_state.cpu().numpy()
            # delta_state=np.mean(delta_state, axis=1).reshape(-1, 1)
            s_bing = (abs(delta_state) < recon_s_rate) * 1 # 计算预测误差是否在允许的范围内，又是一个bool矩阵
            parameter_relable_rate = sum(s_bing.reshape(1, -1)[0]) / batch_size # 计算在允许范围内的比例，也就是计算预测比较准确的比例
            s_bing = torch.FloatTensor(s_bing).float().to(device)# 转换为tensor
            # print("s_bing",s_bing)

            # 将真实的样本数据通过vae编码重建得到动作和观察的嵌入表示，以及连续动作和观察的潜在空间均值和方差
            recon_c, recon_s, mean, std = action_rep.vae(state, discrete_emb_table, parameter_action)
            parameter_emb_ = mean + std * torch.randn_like(std) # 重参数化采样潜在空间的值
            for i in range(len(parameter_emb_[0])):
                # todo 后续修改映射的范围，减少这种映射操作
                # 这个循环是在逐维度处理参数嵌入，将每个维度的值从VAE的分布范围映射回标准化范围
                # 这里对应于在采样时做的转换
                '''
                这是范围标准化的逆变换：
                    输入: VAE分布范围内的参数嵌入值
                    输出: 标准化范围[-1, 1]内的参数嵌入值
                '''
                parameter_emb_[:, i:i + 1] = self.true_parameter_emb(parameter_emb_[:, i:i + 1], c_rate, i)
            # print("parameter_emb",parameter_emb)
            # print("parameter_emb_",parameter_emb_)

            # 根据s_bing选择性地替换经验池中的连续动作嵌入，保证传入到训练中的连续动作嵌入是比较准确的
            # 同理由于模型的不断迭代，潜在空间采样的连续动作嵌入会越来越准确，所以需要确保经验池中的连续动作嵌入也是比较准确的，对于不准确的就用重新编码得到的
            parameter_emb_ = s_bing * parameter_emb + (1 - s_bing) * parameter_emb_
            # print("parameter_emb_final",parameter_emb_)

            discrete_emb_ = discrete_emb_.clamp(-self.max_action, self.max_action)
            parameter_emb_ = parameter_emb_.clamp(-self.max_action, self.max_action)

            discrete_emb = discrete_emb_
            parameter_emb = parameter_emb_

        # Compute the target Q value
        next_discrete_action, next_parameter_action = self.actor_target(next_state)
        target_Q = self.critic_target(next_state, next_discrete_action, next_parameter_action)
        target_Q = reward + (not_done * self.discount * target_Q).detach()

        # Get current Q estimate
        current_Q = self.critic(state, discrete_emb, parameter_emb)

        # Compute critic loss
        critic_loss = F.mse_loss(current_Q, target_Q)

        # Optimize the critic
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        # Compute actor loss
        inverting_gradients = True
        # inverting_gradients = False
        # Compute actor losse
        if inverting_gradients:
            # 这里计算存在重复，看md文档
            with torch.no_grad():
                next_discrete_action, next_parameter_action = self.actor(state)
                action_params = torch.cat((next_discrete_action, next_parameter_action), dim=1)
            action_params.requires_grad = True
            # 这个损失就是经典的求最大Q值的损失
            actor_loss = self.critic(state, action_params[:, :self.discrete_action_dim],
                                     action_params[:, self.discrete_action_dim:]).mean()
        else:
            next_discrete_action, next_parameter_action = self.actor(state)
            actor_loss = -self.critic(state, next_discrete_action, next_parameter_action).mean()

        # Optimize the actor
        self.actor_optimizer.zero_grad()
        actor_loss.backward()

        if inverting_gradients:
            from copy import deepcopy
            delta_a = deepcopy(action_params.grad.data)
            # 2 - apply inverting gradients and combine with gradients from actor
            actions, action_params = self.actor(Variable(state))
            # # 对连续动作参数应用梯度反转
            action_params = torch.cat((actions, action_params), dim=1)
            # 根据预测的动作值和边界值调整梯度
            delta_a[:, self.discrete_action_dim:] = self._invert_gradients(
                delta_a[:, self.discrete_action_dim:].cpu(),
                action_params[:, self.discrete_action_dim:].cpu(),
                grad_type="action_parameters", inplace=True)
            # 对离散动作嵌入应用梯度反转
            delta_a[:, :self.discrete_action_dim] = self._invert_gradients(
                delta_a[:, :self.discrete_action_dim].cpu(),
                action_params[:, :self.discrete_action_dim].cpu(),
                grad_type="actions", inplace=True)
            # 这段代码的作用是将调整后的梯度应用到Actor网络参数上，详细看md文档
            out = -torch.mul(delta_a, action_params)
            self.actor.zero_grad()
            out.backward(torch.ones(out.shape).to(device))
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.)

            # torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.)
            self.actor_optimizer.step()

        # Update the frozen target models
        # 同步更新目标网络
        for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
            target_param.data.copy_(self.tau_critic * param.data + (1 - self.tau_critic) * target_param.data)

        for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
            target_param.data.copy_(self.tau_actor * param.data + (1 - self.tau_actor) * target_param.data)

        return discrete_relable_rate, parameter_relable_rate

    # def save(self, filename):
    # 	torch.save(self.critic.state_dict(), filename + "_critic")
    # 	torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
    #
    # 	torch.save(self.actor.state_dict(), filename + "_actor")
    # 	torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
    #
    #
    # def load(self, filename):
    # 	self.critic.load_state_dict(torch.load(filename + "_critic"))
    # 	self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
    # 	self.critic_target = copy.deepcopy(self.critic)
    #
    # 	self.actor.load_state_dict(torch.load(filename + "_actor"))
    # 	self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
    # 	self.actor_target = copy.deepcopy(self.actor)

    def save(self, filename, directory):
        torch.save(self.critic.state_dict(), '%s/%s_critic.pth' % (directory, filename))
        torch.save(self.critic_optimizer.state_dict(), '%s/%s_critic_optimizer.pth' % (directory, filename))

        torch.save(self.actor.state_dict(), '%s/%s_actor.pth' % (directory, filename))
        torch.save(self.actor_optimizer.state_dict(), '%s/%s_actor_optimizer.pth' % (directory, filename))

    def load(self, filename, directory):
        self.critic.load_state_dict(torch.load('%s/%s_critic.pth' % (directory, filename)))
        self.critic_optimizer.load_state_dict(torch.load('%s/%s_critic_optimizer.pth' % (directory, filename)))
        self.critic_target = copy.deepcopy(self.critic)

        self.actor.load_state_dict(torch.load('%s/%s_actor.pth' % (directory, filename)))
        self.actor_optimizer.load_state_dict(torch.load('%s/%s_actor_optimizer.pth' % (directory, filename)))
        self.actor_target = copy.deepcopy(self.actor)

    def _invert_gradients(self, grad, vals, grad_type, inplace=True):
        '''
        grad: 要调整的梯度张量
        vals: 与梯度对应的动作值张量
        grad_type: 指定梯度类型，是针对离散动作嵌入("actions")还是连续动作参数("action_parameters")
        inplace: 是否在原地修改梯度

        是一个梯度调整函数，用于处理有界动作空间中的梯度优化问题。它根据当前动作值在边界中的位置，智能地调整梯度强度，避免边界处的梯度消失问题
        '''

        if grad_type == "actions":
            max_p = self.action_max.cpu() # 上边界 [1, 1, 1, ...]
            min_p = self.action_min.cpu()  # 下边界 [-1, -1, -1, ...]
            rnge = self.action_range.cpu() # 范围 [2, 2, 2, ...]
        elif grad_type == "action_parameters":
            max_p = self.action_parameter_max.cpu()
            min_p = self.action_parameter_min.cpu()
            rnge = self.action_parameter_range.cpu()
        else:
            raise ValueError("Unhandled grad_type: '" + str(grad_type) + "'")

        assert grad.shape == vals.shape

        if not inplace:
            grad = grad.clone()
        with torch.no_grad():
            for n in range(grad.shape[0]): # 遍历batch中的每个样本
                # index = grad < 0  # actually > but Adam minimises, so reversed (could also double negate the grad)
                '''
                原理：
                正梯度调整公式：
                adjusted_grad_positive = original_grad * (max_value - current_value) / range
                负梯度调整公式：
                adjusted_grad_negative = original_grad * (current_value - min_value) / range

                边界感知的梯度缩放
                接近边界时：梯度被大幅缩小，避免越界
                远离边界时：梯度保持较大，确保有效学习
                中间位置时：梯度适度调整，平衡探索与约束
                '''
                index = grad[n] > 0
                # # 对正梯度进行缩放
                grad[n][index] *= (index.float() * (max_p - vals[n]) / rnge)[index]
                # # 对负梯度进行缩放
                grad[n][~index] *= ((~index).float() * (vals[n] - min_p) / rnge)[~index]

        return grad

    def count_boundary(self, c_rate):
        median = (c_rate[0] - c_rate[1]) / 2
        offset = c_rate[0] - 1 * median
        return median, offset

    def true_parameter_emb(self, parameter_action, c_rate, i):
        # parameter_action_ = parameter_action.clone()
        median, offset = self.count_boundary(c_rate[i])
        # parameter_action_[i] = parameter_action_[i] * median + offset
        parameter_action = (parameter_action - offset) / median
        return parameter_action
    


# Vanilla Variational Auto-Encoder
class VAE(nn.Module):
    def __init__(self, state_dim, action_dim, action_embedding_dim, continuous_action_dim, latent_dim, max_action,
                 hidden_size=256, device='cpu'):
        '''
        state_dim: 环境观察的维度
        action_dim: 离散动作的维度
        action_embedding_dim：离散动作对应的连续动作值的维度
        parameter_action_dim：所有离散动作对应的连续动作的维度
        latent_dim: todo 动作潜在空间的维度
        max_action: 动作的最大值，看来必须要对称
        hidden_size: 隐藏层的尺寸
        '''
        super(VAE, self).__init__()
        self.device = device
        # embedding table 这个是啥？todo
        # 初始化了一个shape（action_dim, action_embedding_dim) * 2 - 1 )的随机矩阵
        # 感觉类似词潜入矩阵，将离散动作转换为一个潜入向量
        init_tensor = torch.rand(action_dim,
                                 action_embedding_dim) * 2 - 1  # Don't initialize near the extremes.
        self.embeddings = torch.nn.Parameter(init_tensor.type(torch.float32), requires_grad=True)
        print("self.embeddings", self.embeddings)
        self.e0_0 = nn.Linear(state_dim + action_embedding_dim, hidden_size)
        self.e0_1 = nn.Linear(continuous_action_dim, hidden_size)

        self.e1 = nn.Linear(hidden_size, hidden_size)
        self.e2 = nn.Linear(hidden_size, hidden_size)
        self.mean = nn.Linear(hidden_size, latent_dim)
        self.log_std = nn.Linear(hidden_size, latent_dim)

        self.d0_0 = nn.Linear(state_dim + action_embedding_dim, hidden_size)
        self.d0_1 = nn.Linear(latent_dim, hidden_size)
        self.d1 = nn.Linear(hidden_size, hidden_size)
        self.d2 = nn.Linear(hidden_size, hidden_size)

        self.continuous_action_output = nn.Linear(hidden_size, continuous_action_dim)

        self.d3 = nn.Linear(hidden_size, hidden_size)

        self.delta_state_output = nn.Linear(hidden_size, state_dim) # 还原观察

        self.max_action = max_action
        self.latent_dim = latent_dim

    def forward(self, state, action, action_parameter):
        '''
        state:环境观察
        action: 离散动作的嵌入向量 
        action_parameter: 离散动作对应的连续动作的值
        '''
        # 确保张量在同一设备上
        state = state.to(self.device)
        action = action.to(self.device)
        action_parameter = action_parameter.to(self.device)

        z_0 = F.relu(self.e0_0(torch.cat([state, action], 1))) # 离散动作和环境的组合提取特征
        z_1 = F.relu(self.e0_1(action_parameter)) # 离散动作的连续动作提取特征
        z = z_0 * z_1 # 两者相乘结合

        # 进一步提取特征
        z = F.relu(self.e1(z))
        z = F.relu(self.e2(z))

        mean = self.mean(z) # 得到多模态（观察+离散动作+连续动作）下动作潜在空间的均值
        # Clamped for numerical stability
        log_std = self.log_std(z).clamp(-4, 15) # 得到多模态（观察+离散动作+连续动作）下动作潜在空间的方差

        std = torch.exp(log_std) # 得到真实的方差
        z = mean + std * torch.randn_like(std) # 基于均值的采样
        u, s = self.decode(state, z, action) # 解码得到重构的动作和状态（这里的观察是新旧状态之间的差值）

        # todo 确认这里的返回值是啥？
        # 返回重建后的所有动作连续动作、观察（这里的观察是新旧状态之间的差值）、连续动作对应的潜在空间的均值和方差
        return u, s, mean, std

    def decode(self, state, z=None, action=None, clip=None, raw=False):
        '''
        state: 环境观察
        z: 经过vae特征采集后预测的动作潜在空间值
        action：离散动作的嵌入向量 
        raw： True仅返回预测的所有离散动作的连续值，False返回重建后的观察和预测的所有离散动作的连续值

        return: 返回重建后的观察（这里的观察是新旧状态之间的差值）和预测的所有离散动作的连续值
        '''
        # When sampling from the VAE, the latent vector is clipped to [-0.5, 0.5]
        if z is None:
            # 生成一个随机的z预测的动作值
            z = torch.randn((state.shape[0], self.latent_dim)).to(self.device)
            if clip is not None:
                z = z.clamp(-clip, clip)
        # 确保张量在同一设备上
        state = state.to(self.device)
        action = action.to(self.device)
        z = z.to(self.device)

        # 通过环境观察和离散动作的嵌入向量提取特征
        v_0 = F.relu(self.d0_0(torch.cat([state, action], 1)))
        v_1 = F.relu(self.d0_1(z)) # decode动作潜在空间值
        v = v_0 * v_1 # 组合两者特征
        v = F.relu(self.d1(v)) # 进一步decode
        v = F.relu(self.d2(v))

        # 预测连续动作
        continuous_action = self.continuous_action_output(v)

        v = F.relu(self.d3(v))
        s = self.delta_state_output(v) # 重建观察

        if raw: return continuous_action, s
        return self.max_action * torch.tanh(continuous_action), torch.tanh(s)



class NeuralNet(nn.Module):
    def __init__(self):
        super(NeuralNet, self).__init__()
        self.ctr = 0
        self.nan_check_fequency = 10000

    def custom_weight_init(self):
        # Initialize the weight values
        for m in self.modules():
            weight_init(m)

    def update(self, loss, retain_graph=False, clip_norm=False):
        self.optim.zero_grad()  # Reset the gradients
        loss.backward(retain_graph=retain_graph)
        self.step(clip_norm)

    def step(self, clip_norm):
        if clip_norm:
            torch.nn.utils.clip_grad_norm_(self.parameters(), clip_norm)
        self.optim.step()
        self.check_nan()

    def save(self, filename):
        torch.save(self.state_dict(), filename)

    def load(self, filename):
        self.load_state_dict(torch.load(filename))

    def check_nan(self):
        # Check for nan periodically
        self.ctr += 1
        if self.ctr == self.nan_check_fequency:
            self.ctr = 0
            # Note: nan != nan  #https://github.com/pytorch/pytorch/issues/4767
            for name, param in self.named_parameters():
                if (param != param).any():
                    raise ValueError(name + ": Weights have become nan... Exiting.")

    def reset(self):
        return


class Action_representation(NeuralNet):
    def __init__(self,
                 state_dim,
                 action_dim,
                 continuous_action_dim,
                 reduced_action_dim=2,
                 reduce_continuous_action_dim=2,
                 embed_lr=1e-4,
                 device='cpu'
                 ):
        super(Action_representation, self).__init__()
        self.device = device
        # self.device = torch.device("cpu")
        self.continuous_action_dim = continuous_action_dim
        self.reduced_action_dim = reduced_action_dim
        self.reduce_continuous_action_dim = reduce_continuous_action_dim
        self.state_dim = state_dim
        self.action_dim = action_dim
        # Action embeddings to project the predicted action into original dimensions
        # latent_dim=action_dim*2+continuous_action_dim*2
        self.latent_dim = self.reduce_continuous_action_dim
        self.embed_lr = embed_lr
        self.vae = VAE(state_dim=self.state_dim, action_dim=self.action_dim,
                       action_embedding_dim=self.reduced_action_dim, continuous_action_dim=self.continuous_action_dim,
                       latent_dim=self.latent_dim, max_action=1.0,
                       hidden_size=256,
                       device=self.device).to(self.device)
        self.vae_optimizer = torch.optim.Adam(self.vae.parameters(), lr=1e-4)

    def discrete_embedding(self, ):
        emb = self.vae.embeddings

        return emb

    def unsupervised_loss(self, s1, a1, a2, s2, sup_batch_size, embed_lr):
        '''
        s1: 环境观察
        a1：离散动作
        a2：离散动作对应的连续动作的值
        s2：新旧状态之间的差值
        sup_batch_size：采样的样本数
        embed_lr：学习率
        '''

        # 将离散动作转换嵌入向量
        a1 = self.get_embedding(a1).to(self.device)
        
        # 完成vae的训练，并返回：vae重建损失、观察变化损失、连续动作重建损失、KL约束散度损失 以上损失都只是标量值，估计只是为了记录
        vae_loss, recon_loss_d, recon_loss_c, KL_loss = self.train_step(s1, a1, a2, s2, sup_batch_size, embed_lr)
        return vae_loss, recon_loss_d, recon_loss_c, KL_loss

    def loss(self, state, action_d, action_c, next_state, sup_batch_size):
        '''
        state:环境观察
        action_d: 离散动作的潜入向量 
        action_c 离散动作对应的连续动作的值
        next_state, 新旧状态之间的差值
        sup_batch_size：采样的样本数

        return: vae重建损失、观察变化损失、连续动作重建损失、KL约束散度损失
        '''
        # recon_s: 重构新旧状态之间的差值观察
        recon_c, recon_s, mean, std = self.vae(state, action_d, action_c)

        #  size_average=True：计算所有元素的MSE后，再除以元素总数，返回的是平均损失值
        recon_loss_s = F.mse_loss(recon_s, next_state, size_average=True) # 计算重建观察差值之间的损失
        recon_loss_c = F.mse_loss(recon_c, action_c, size_average=True) # 计算重建离散动作对应的连续动作值之间的损失

        # 这个最大的作用感觉是即保证了网络接近重建的能力又保证了一定的随机性
        KL_loss = -0.5 * (1 + torch.log(std.pow(2)) - mean.pow(2) - std.pow(2)).mean()

        # vae_loss = 0.25 * recon_loss_s + recon_loss_c + 0.5 * KL_loss
        # vae_loss = 0.25 * recon_loss_s + 2.0 * recon_loss_c + 0.5 * KL_loss  #best
        vae_loss = recon_loss_s + 2.0 * recon_loss_c + 0.5 * KL_loss # 计算总损失，其中的比例通过注释可知是测试出来的
        # print("vae loss",vae_loss)
        # return vae_loss, 0.25 * recon_loss_s, recon_loss_c, 0.5 * KL_loss
        # return vae_loss, 0.25 * recon_loss_s, 2.0 * recon_loss_c, 0.5 * KL_loss #best
        return vae_loss, recon_loss_s, 2.0 * recon_loss_c, 0.5 * KL_loss

    def train_step(self, s1, a1, a2, s2, sup_batch_size, embed_lr=1e-4):
        '''
        s1:环境观察
        a1: 离散动作的潜入向量 
        a2： 离散动作对应的连续动作的值
        s2, 新旧状态之间的差值
        sup_batch_size：采样的样本数
        embed_lr：学习率

        return vae重建损失、观察变化损失、连续动作重建损失、KL约束散度损失 以上损失都只是标量值，估计只是为了记录
        '''
        state = s1
        action_d = a1
        action_c = a2
        next_state = s2
        vae_loss, recon_loss_s, recon_loss_c, KL_loss = self.loss(state, action_d, action_c, next_state,
                                                                  sup_batch_size)

        # 更新VAE 
        self.vae_optimizer = torch.optim.Adam(self.vae.parameters(), lr=embed_lr) # todo 这里存在问题，如果仅仅只是为了手动调整学习率，可以有更好的做法，参考md
        self.vae_optimizer.zero_grad()
        vae_loss.backward()
        self.vae_optimizer.step()

        return vae_loss.cpu().data.numpy(), recon_loss_s.cpu().data.numpy(), recon_loss_c.cpu().data.numpy(), KL_loss.cpu().data.numpy()

    def select_parameter_action(self, state, z, action):
        '''
        state: 环境观察
        z: 预测连续动作并转换为真实动作范围的连续动作嵌入向量
        action：预测离散动作的嵌入向量

        return 返回预测的连续动作的连续动作值
        '''
        with torch.no_grad():
            state = torch.FloatTensor(state.reshape(1, -1)).to(self.device)
            z = torch.FloatTensor(z.reshape(1, -1)).to(self.device)
            action = torch.FloatTensor(action.reshape(1, -1)).to(self.device)
            action_c, state = self.vae.decode(state, z, action) # 返回重建后的观察（这里的观察是新旧状态之间的差值）和预测的所有离散动作的连续值
        return action_c.cpu().data.numpy().flatten()

    # def select_delta_state(self, state, z, action):
    #     with torch.no_grad():
    #         state = torch.FloatTensor(state.reshape(1, -1)).to(self.device)
    #         z = torch.FloatTensor(z.reshape(1, -1)).to(self.device)
    #         action = torch.FloatTensor(action.reshape(1, -1)).to(self.device)
    #         action_c, state = self.vae.decode(state, z, action)
    #     return state.cpu().data.numpy().flatten()
    def select_delta_state(self, state, z, action):
        '''
        state: 环境观察
        z：连续动作的嵌入表示
        action： 离散动作的嵌入表示

        return: 重建后的新旧观察差值
        '''
        with torch.no_grad():
            action_c, state = self.vae.decode(state, z, action)
        return state.cpu().data.numpy()

    def get_embedding(self, action):
        '''
        action：执行的离散动作

        return: 将离散动作转换为一个嵌入向量，类似LSTM的嵌入模型
        '''
        # Get the corresponding target embedding
        action_emb = self.vae.embeddings[action] # 看起来是从VAE的embeddings随机矩阵中选择一个和离散动作对应的随机连续动作值
        action_emb = torch.tanh(action_emb) # 归一化到-1～1
        return action_emb

    def get_match_scores(self, action):
        '''
        action: 输入的是离散动作的嵌入向量

        return: 输入动作嵌入与所有可能动作嵌入的相似度
        '''
        # compute similarity probability based on L2 norm
        embeddings = self.vae.embeddings
        embeddings = torch.tanh(embeddings)
        action = action.to(self.device)
        # compute similarity probability based on L2 norm 输入动作嵌入与所有可能动作嵌入的相似度
        similarity = - common.pairwise_distances(action, embeddings)  # Negate euclidean to convert diff into similarity score
        return similarity

        # 获得最优动作，输出于embedding最相近的action 作为最优动作.

    def select_discrete_action(self, action):
        '''
        将策略网络输出的动作嵌入转换为具体的离散动作索引
        action: 输入的是离散动作的嵌入向量

        return: 离散动作的索引  也就是选择的离散动作
        '''
    
        similarity = self.get_match_scores(action)
        val, pos = torch.max(similarity, dim=1) # 选择相似度最大的动作位置索引
        # print("pos",pos,len(pos))
        if len(pos) == 1:
            return pos.cpu().item()  # data.numpy()[0]
        else:
            # print("pos.cpu().item()", pos.cpu().numpy())
            return pos.cpu().numpy()

    def save(self, filename, directory):
        torch.save(self.vae.state_dict(), '%s/%s_vae.pth' % (directory, filename))
        # torch.save(self.vae.embeddings, '%s/%s_embeddings.pth' % (directory, filename))

    def load(self, filename, directory):
        self.vae.load_state_dict(torch.load('%s/%s_vae.pth' % (directory, filename), map_location=self.device))
        # self.vae.embeddings = torch.load('%s/%s_embeddings.pth' % (directory, filename), map_location=self.device)

    def get_c_rate(self, s1, a1, a2, s2, batch_size=100, range_rate=5):
        '''
        s1: 环境观察
        a1: 离散动作
        a2: 离散动作对应的连续动作的值
        s2: 新旧动作之间的差值
        batch_size: 采样的尺寸
        range_rate： todo

        return 离散动作潜在空间的边界范围、重建观察差值损失
        '''
        a1 = self.get_embedding(a1).to(self.device) # 将离散动作转换为嵌入向量
        s1 = s1.to(self.device)
        s2 = s2.to(self.device)
        a2 = a2.to(self.device)
        recon_c, recon_s, mean, std = self.vae(s1, a1, a2) # 利用vae计算预测的重建观察差值、重建连续动作、重建离散动作均值和方差
        # print("recon_s",recon_s.shape)
        # std * torch.randn_like(std)：实现VAE的重参数化技巧(Reparameterization Trick)
        '''
        mean = self.mean(z) # 得到动作的均值
        log_std = self.log_std(z).clamp(-4, 15) # 得到动作的方差log值
        std = torch.exp(log_std) # 得到真实的方差
        z = mean + std * torch.randn_like(std) # 重参数化采样

        保证梯度可以传播又可以保证随机性，如果直接torch.normal(mean, std)计算采样得到的动作是无法传播的
        '''
        z = mean + std * torch.randn_like(std) # 采样离散动作
        z = z.cpu().data.numpy()
        # 返回离散动作的潜在空间的边界值范围
        c_rate = self.z_range(z, batch_size, range_rate)
        # print("s2",s2.shape)

        # 计算重建观察差值损失
        recon_s_loss = F.mse_loss(recon_s, s2, size_average=True)

        # recon_s = abs(np.mean(recon_s.cpu().data.numpy()))
        return c_rate, recon_s_loss.detach().cpu().numpy()

    def z_range(self, z, batch_size=100, range_rate=5):
        '''
        函数的作用是计算潜在空间采样值的动态边界范围
        z: 采样得到的离散动作
        batch_size: 训练batch
        range_rate：todo
        '''

        # todo
        self.z1, self.z2, self.z3, self.z4, self.z5, self.z6, self.z7, self.z8, self.z9,\
        self.z10,self.z11,self.z12,self.z13,self.z14,self.z15,self.z16 = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
        border = int(range_rate * (batch_size / 100)) # # 计算边界索引 todo 这个值可能是需要根据实际进行调整

        # print("border",border)
        if len(z[0]) == 16:
            # # 对每个潜在维度分别处理
            for i in range(len(z)):
                self.z1.append(z[i][0]) # # 收集第1维的所有值
                self.z2.append(z[i][1]) # 收集第2维的所有值
                self.z3.append(z[i][2]) # 收集第3维的所有值
                self.z4.append(z[i][3]) # 收集第4维的所有值
                self.z5.append(z[i][4]) # 收集第5维的所有值
                self.z6.append(z[i][5]) # 收集第6维的所有值
                self.z7.append(z[i][6]) # 收集第7维的所有值
                self.z8.append(z[i][7]) # 收集第8维的所有值
                self.z9.append(z[i][8]) # 收集第9维的所有值
                self.z10.append(z[i][9]) # 收集第10维的所有值
                self.z11.append(z[i][10]) # 收集第11维的所有值
                self.z12.append(z[i][11]) # 收集第12维的所有值
                self.z13.append(z[i][12]) # 收集第13维的所有值
                self.z14.append(z[i][13]) # 收集第14维的所有值
                self.z15.append(z[i][14]) # 收集第15维的所有值
                self.z16.append(z[i][15]) # 收集第16维的所有值

        if len(z[0]) == 16:
            # 对收集后的维度所有值进行排序
            self.z1.sort(), self.z2.sort(), self.z3.sort(), self.z4.sort(), self.z5.sort(), self.z6.sort(), self.z7.sort(), self.z8.sort(), \
            self.z9.sort(), self.z10.sort(), self.z11.sort(), self.z12.sort(),self.z13.sort(), self.z14.sort(), self.z15.sort(), self.z16.sort()
            c_rate_1_up = self.z1[-border - 1] # # 上边界：排序后的第95%分位
            c_rate_1_down = self.z1[border] #  # 下边界：排序后的第5%分位
            c_rate_2_up = self.z2[-border - 1]
            c_rate_2_down = self.z2[border]
            c_rate_3_up = self.z3[-border - 1]
            c_rate_3_down = self.z3[border]
            c_rate_4_up = self.z4[-border - 1]
            c_rate_4_down = self.z4[border]
            c_rate_5_up = self.z5[-border - 1]
            c_rate_5_down = self.z5[border]
            c_rate_6_up = self.z6[-border - 1]
            c_rate_6_down = self.z6[border]
            c_rate_7_up = self.z7[-border - 1]
            c_rate_7_down = self.z7[border]
            c_rate_8_up = self.z8[-border - 1]
            c_rate_8_down = self.z8[border]
            c_rate_9_up = self.z9[-border - 1]
            c_rate_9_down = self.z9[border]
            c_rate_10_up = self.z10[-border - 1]
            c_rate_10_down = self.z10[border]
            c_rate_11_up = self.z11[-border - 1]
            c_rate_11_down = self.z11[border]
            c_rate_12_up = self.z12[-border - 1]
            c_rate_12_down = self.z12[border]
            c_rate_13_up = self.z13[-border - 1]
            c_rate_13_down = self.z13[border]
            c_rate_14_up = self.z14[-border - 1]
            c_rate_14_down = self.z14[border]
            c_rate_15_up = self.z15[-border - 1]
            c_rate_15_down = self.z15[border]
            c_rate_16_up = self.z16[-border - 1]
            c_rate_16_down = self.z16[border]

            c_rate_1, c_rate_2, c_rate_3, c_rate_4, c_rate_5, c_rate_6, c_rate_7, c_rate_8, \
            c_rate_9, c_rate_10, c_rate_11, c_rate_12, c_rate_13, c_rate_14, c_rate_15, c_rate_16 = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
            c_rate_1.append(c_rate_1_up), c_rate_1.append(c_rate_1_down)
            c_rate_2.append(c_rate_2_up), c_rate_2.append(c_rate_2_down)
            c_rate_3.append(c_rate_3_up), c_rate_3.append(c_rate_3_down)
            c_rate_4.append(c_rate_4_up), c_rate_4.append(c_rate_4_down)
            c_rate_5.append(c_rate_5_up), c_rate_5.append(c_rate_5_down)
            c_rate_6.append(c_rate_6_up), c_rate_6.append(c_rate_6_down)
            c_rate_7.append(c_rate_7_up), c_rate_7.append(c_rate_7_down)
            c_rate_8.append(c_rate_8_up), c_rate_8.append(c_rate_8_down)
            c_rate_9.append(c_rate_9_up), c_rate_9.append(c_rate_9_down)
            c_rate_10.append(c_rate_10_up), c_rate_10.append(c_rate_10_down)
            c_rate_11.append(c_rate_11_up), c_rate_11.append(c_rate_11_down)
            c_rate_12.append(c_rate_12_up), c_rate_12.append(c_rate_12_down)
            c_rate_13.append(c_rate_13_up), c_rate_13.append(c_rate_13_down)
            c_rate_14.append(c_rate_14_up), c_rate_14.append(c_rate_14_down)
            c_rate_15.append(c_rate_15_up), c_rate_15.append(c_rate_15_down)
            c_rate_16.append(c_rate_16_up), c_rate_16.append(c_rate_16_down)

            # 返回每个维度的范围（上边界和下边界）
            return c_rate_1, c_rate_2, c_rate_3, c_rate_4, c_rate_5, c_rate_6, c_rate_7, c_rate_8,\
                   c_rate_9, c_rate_10, c_rate_11, c_rate_12,c_rate_13, c_rate_14, c_rate_15, c_rate_16

        if len(z[0]) == 12:
            for i in range(len(z)):
                self.z1.append(z[i][0])
                self.z2.append(z[i][1])
                self.z3.append(z[i][2])
                self.z4.append(z[i][3])
                self.z5.append(z[i][4])
                self.z6.append(z[i][5])
                self.z7.append(z[i][6])
                self.z8.append(z[i][7])
                self.z9.append(z[i][8])
                self.z10.append(z[i][9])
                self.z11.append(z[i][10])
                self.z12.append(z[i][11])

        if len(z[0]) == 12:
            self.z1.sort(), self.z2.sort(), self.z3.sort(), self.z4.sort(), self.z5.sort(), self.z6.sort(), self.z7.sort(), self.z8.sort(), \
            self.z9.sort(), self.z10.sort(), self.z11.sort(), self.z12.sort()
            c_rate_1_up = self.z1[-border - 1]
            c_rate_1_down = self.z1[border]
            c_rate_2_up = self.z2[-border - 1]
            c_rate_2_down = self.z2[border]
            c_rate_3_up = self.z3[-border - 1]
            c_rate_3_down = self.z3[border]
            c_rate_4_up = self.z4[-border - 1]
            c_rate_4_down = self.z4[border]
            c_rate_5_up = self.z5[-border - 1]
            c_rate_5_down = self.z5[border]
            c_rate_6_up = self.z6[-border - 1]
            c_rate_6_down = self.z6[border]
            c_rate_7_up = self.z7[-border - 1]
            c_rate_7_down = self.z7[border]
            c_rate_8_up = self.z8[-border - 1]
            c_rate_8_down = self.z8[border]
            c_rate_9_up = self.z9[-border - 1]
            c_rate_9_down = self.z9[border]
            c_rate_10_up = self.z10[-border - 1]
            c_rate_10_down = self.z10[border]
            c_rate_11_up = self.z11[-border - 1]
            c_rate_11_down = self.z11[border]
            c_rate_12_up = self.z12[-border - 1]
            c_rate_12_down = self.z12[border]
            c_rate_1, c_rate_2, c_rate_3, c_rate_4, c_rate_5, c_rate_6, c_rate_7, c_rate_8, c_rate_9, c_rate_10, c_rate_11, c_rate_12 = [], [], [], [], [], [], [], [], [], [], [], []
            c_rate_1.append(c_rate_1_up), c_rate_1.append(c_rate_1_down)
            c_rate_2.append(c_rate_2_up), c_rate_2.append(c_rate_2_down)
            c_rate_3.append(c_rate_3_up), c_rate_3.append(c_rate_3_down)
            c_rate_4.append(c_rate_4_up), c_rate_4.append(c_rate_4_down)
            c_rate_5.append(c_rate_5_up), c_rate_5.append(c_rate_5_down)
            c_rate_6.append(c_rate_6_up), c_rate_6.append(c_rate_6_down)
            c_rate_7.append(c_rate_7_up), c_rate_7.append(c_rate_7_down)
            c_rate_8.append(c_rate_8_up), c_rate_8.append(c_rate_8_down)
            c_rate_9.append(c_rate_9_up), c_rate_9.append(c_rate_9_down)
            c_rate_10.append(c_rate_10_up), c_rate_10.append(c_rate_10_down)
            c_rate_11.append(c_rate_11_up), c_rate_11.append(c_rate_11_down)
            c_rate_12.append(c_rate_12_up), c_rate_12.append(c_rate_12_down)
            return c_rate_1, c_rate_2, c_rate_3, c_rate_4, c_rate_5, c_rate_6, c_rate_7, c_rate_8, c_rate_9, c_rate_10, c_rate_11, c_rate_12

        if len(z[0]) == 10:
            for i in range(len(z)):
                self.z1.append(z[i][0])
                self.z2.append(z[i][1])
                self.z3.append(z[i][2])
                self.z4.append(z[i][3])
                self.z5.append(z[i][4])
                self.z6.append(z[i][5])
                self.z7.append(z[i][6])
                self.z8.append(z[i][7])
                self.z9.append(z[i][8])
                self.z10.append(z[i][9])

        if len(z[0]) == 10:
            self.z1.sort(), self.z2.sort(), self.z3.sort(), self.z4.sort(), self.z5.sort(), self.z6.sort(), self.z7.sort(), self.z8.sort(), self.z9.sort(), self.z10.sort()
            c_rate_1_up = self.z1[-border - 1]
            c_rate_1_down = self.z1[border]
            c_rate_2_up = self.z2[-border - 1]
            c_rate_2_down = self.z2[border]
            c_rate_3_up = self.z3[-border - 1]
            c_rate_3_down = self.z3[border]
            c_rate_4_up = self.z4[-border - 1]
            c_rate_4_down = self.z4[border]
            c_rate_5_up = self.z5[-border - 1]
            c_rate_5_down = self.z5[border]
            c_rate_6_up = self.z6[-border - 1]
            c_rate_6_down = self.z6[border]
            c_rate_7_up = self.z7[-border - 1]
            c_rate_7_down = self.z7[border]
            c_rate_8_up = self.z8[-border - 1]
            c_rate_8_down = self.z8[border]
            c_rate_9_up = self.z9[-border - 1]
            c_rate_9_down = self.z9[border]
            c_rate_10_up = self.z10[-border - 1]
            c_rate_10_down = self.z10[border]
            c_rate_1, c_rate_2, c_rate_3, c_rate_4, c_rate_5, c_rate_6, c_rate_7, c_rate_8, c_rate_9, c_rate_10 = [], [], [], [], [], [], [], [], [], []
            c_rate_1.append(c_rate_1_up), c_rate_1.append(c_rate_1_down)
            c_rate_2.append(c_rate_2_up), c_rate_2.append(c_rate_2_down)
            c_rate_3.append(c_rate_3_up), c_rate_3.append(c_rate_3_down)
            c_rate_4.append(c_rate_4_up), c_rate_4.append(c_rate_4_down)
            c_rate_5.append(c_rate_5_up), c_rate_5.append(c_rate_5_down)
            c_rate_6.append(c_rate_6_up), c_rate_6.append(c_rate_6_down)
            c_rate_7.append(c_rate_7_up), c_rate_7.append(c_rate_7_down)
            c_rate_8.append(c_rate_8_up), c_rate_8.append(c_rate_8_down)
            c_rate_9.append(c_rate_9_up), c_rate_9.append(c_rate_9_down)
            c_rate_10.append(c_rate_10_up), c_rate_10.append(c_rate_10_down)
            return c_rate_1, c_rate_2, c_rate_3, c_rate_4, c_rate_5, c_rate_6, c_rate_7, c_rate_8, c_rate_9, c_rate_10

        if len(z[0]) == 8:
            for i in range(len(z)):
                self.z1.append(z[i][0])
                self.z2.append(z[i][1])
                self.z3.append(z[i][2])
                self.z4.append(z[i][3])
                self.z5.append(z[i][4])
                self.z6.append(z[i][5])
                self.z7.append(z[i][6])
                self.z8.append(z[i][7])

        if len(z[0]) == 8:
            self.z1.sort(), self.z2.sort(), self.z3.sort(), self.z4.sort(), self.z5.sort(), self.z6.sort(), self.z7.sort(), self.z8.sort()
            c_rate_1_up = self.z1[-border - 1]
            c_rate_1_down = self.z1[border]
            c_rate_2_up = self.z2[-border - 1]
            c_rate_2_down = self.z2[border]
            c_rate_3_up = self.z3[-border - 1]
            c_rate_3_down = self.z3[border]
            c_rate_4_up = self.z4[-border - 1]
            c_rate_4_down = self.z4[border]
            c_rate_5_up = self.z5[-border - 1]
            c_rate_5_down = self.z5[border]
            c_rate_6_up = self.z6[-border - 1]
            c_rate_6_down = self.z6[border]
            c_rate_7_up = self.z7[-border - 1]
            c_rate_7_down = self.z7[border]
            c_rate_8_up = self.z8[-border - 1]
            c_rate_8_down = self.z8[border]
            c_rate_1, c_rate_2, c_rate_3, c_rate_4, c_rate_5, c_rate_6, c_rate_7, c_rate_8 = [], [], [], [], [], [], [], []
            c_rate_1.append(c_rate_1_up), c_rate_1.append(c_rate_1_down)
            c_rate_2.append(c_rate_2_up), c_rate_2.append(c_rate_2_down)
            c_rate_3.append(c_rate_3_up), c_rate_3.append(c_rate_3_down)
            c_rate_4.append(c_rate_4_up), c_rate_4.append(c_rate_4_down)
            c_rate_5.append(c_rate_5_up), c_rate_5.append(c_rate_5_down)
            c_rate_6.append(c_rate_6_up), c_rate_6.append(c_rate_6_down)
            c_rate_7.append(c_rate_7_up), c_rate_7.append(c_rate_7_down)
            c_rate_8.append(c_rate_8_up), c_rate_8.append(c_rate_8_down)
            return c_rate_1, c_rate_2, c_rate_3, c_rate_4, c_rate_5, c_rate_6, c_rate_7, c_rate_8

        if len(z[0]) == 6:
            for i in range(len(z)):
                self.z1.append(z[i][0])
                self.z2.append(z[i][1])
                self.z3.append(z[i][2])
                self.z4.append(z[i][3])
                self.z5.append(z[i][4])
                self.z6.append(z[i][5])

        if len(z[0]) == 6:
            self.z1.sort(), self.z2.sort(), self.z3.sort(), self.z4.sort(), self.z5.sort(), self.z6.sort()
            c_rate_1_up = self.z1[-border - 1]
            c_rate_1_down = self.z1[border]
            c_rate_2_up = self.z2[-border - 1]
            c_rate_2_down = self.z2[border]
            c_rate_3_up = self.z3[-border - 1]
            c_rate_3_down = self.z3[border]
            c_rate_4_up = self.z4[-border - 1]
            c_rate_4_down = self.z4[border]
            c_rate_5_up = self.z5[-border - 1]
            c_rate_5_down = self.z5[border]
            c_rate_6_up = self.z6[-border - 1]
            c_rate_6_down = self.z6[border]

            c_rate_1, c_rate_2, c_rate_3, c_rate_4, c_rate_5, c_rate_6 = [], [], [], [], [], []
            c_rate_1.append(c_rate_1_up), c_rate_1.append(c_rate_1_down)
            c_rate_2.append(c_rate_2_up), c_rate_2.append(c_rate_2_down)
            c_rate_3.append(c_rate_3_up), c_rate_3.append(c_rate_3_down)
            c_rate_4.append(c_rate_4_up), c_rate_4.append(c_rate_4_down)
            c_rate_5.append(c_rate_5_up), c_rate_5.append(c_rate_5_down)
            c_rate_6.append(c_rate_6_up), c_rate_6.append(c_rate_6_down)

            return c_rate_1, c_rate_2, c_rate_3, c_rate_4, c_rate_5, c_rate_6

        if len(z[0]) == 4:
            for i in range(len(z)):
                self.z1.append(z[i][0])
                self.z2.append(z[i][1])
                self.z3.append(z[i][2])
                self.z4.append(z[i][3])

        if len(z[0]) == 4:
            self.z1.sort(), self.z2.sort(), self.z3.sort(), self.z4.sort()
            # print("lenz1",len(self.z1),self.z1)
            c_rate_1_up = self.z1[-border - 1]
            c_rate_1_down = self.z1[border]
            c_rate_2_up = self.z2[-border - 1]
            c_rate_2_down = self.z2[border]
            c_rate_3_up = self.z3[-border - 1]
            c_rate_3_down = self.z3[border]
            c_rate_4_up = self.z4[-border - 1]
            c_rate_4_down = self.z4[border]

            c_rate_1, c_rate_2, c_rate_3, c_rate_4 = [], [], [], []
            c_rate_1.append(c_rate_1_up), c_rate_1.append(c_rate_1_down)
            c_rate_2.append(c_rate_2_up), c_rate_2.append(c_rate_2_down)
            c_rate_3.append(c_rate_3_up), c_rate_3.append(c_rate_3_down)
            c_rate_4.append(c_rate_4_up), c_rate_4.append(c_rate_4_down)

            return c_rate_1, c_rate_2, c_rate_3, c_rate_4

        if len(z[0]) == 3:
            for i in range(len(z)):
                self.z1.append(z[i][0])
                self.z2.append(z[i][1])
                self.z3.append(z[i][2])

        if len(z[0]) == 3:
            self.z1.sort(), self.z2.sort(), self.z3.sort()
            # print("lenz1",len(self.z1),self.z1)
            c_rate_1_up = self.z1[-border - 1]
            c_rate_1_down = self.z1[border]
            c_rate_2_up = self.z2[-border - 1]
            c_rate_2_down = self.z2[border]
            c_rate_3_up = self.z3[-border - 1]
            c_rate_3_down = self.z3[border]

            c_rate_1, c_rate_2, c_rate_3 = [], [], []
            c_rate_1.append(c_rate_1_up), c_rate_1.append(c_rate_1_down)
            c_rate_2.append(c_rate_2_up), c_rate_2.append(c_rate_2_down)
            c_rate_3.append(c_rate_3_up), c_rate_3.append(c_rate_3_down)

            return c_rate_1, c_rate_2, c_rate_3
        
        if len(z[0]) == 2:
            for i in range(len(z)):
                self.z1.append(z[i][0])
                self.z2.append(z[i][1])

        if len(z[0]) == 2:
            self.z1.sort(), self.z2.sort()
            c_rate_1_up = self.z1[-border - 1]
            c_rate_1_down = self.z1[border]
            c_rate_2_up = self.z2[-border - 1]
            c_rate_2_down = self.z2[border]

            c_rate_1, c_rate_2 = [], []
            c_rate_1.append(c_rate_1_up), c_rate_1.append(c_rate_1_down)
            c_rate_2.append(c_rate_2_up), c_rate_2.append(c_rate_2_down)

            return c_rate_1, c_rate_2
        


class QActor(nn.Module):

    def __init__(self, state_size, action_size, action_parameter_size, hidden_layers=(100,), action_input_layer=0,
                 output_layer_init_std=None, activation="relu", **kwargs):
        '''
        todo 这里应该是输出每一个离散动作的Q值

        state_size: 环境观察的维度
        action_size: 离散动作的维度
        action_parameter_size: 连续动作的维度
        hidden_layers: 隐藏层的维度
        action_input_layer: 动作参数输入层的位置，0表示输入层，-1 todo
        output_layer_init_std: todo
        activateion: 激活函数
        '''
        super(QActor, self).__init__()
        self.state_size = state_size
        self.action_size = action_size
        self.action_parameter_size = action_parameter_size
        self.activation = activation

        # create layers
        self.layers = nn.ModuleList()
        inputSize = self.state_size + self.action_parameter_size # 看来也是是输入了环境观察和连续动作维度
        lastHiddenLayerSize = inputSize # 存储最近一层的输入维度
        if hidden_layers is not None:
            nh = len(hidden_layers)
            self.layers.append(nn.Linear(inputSize, hidden_layers[0]))
            for i in range(1, nh):
                self.layers.append(nn.Linear(hidden_layers[i - 1], hidden_layers[i]))
            lastHiddenLayerSize = hidden_layers[nh - 1]
        self.layers.append(nn.Linear(lastHiddenLayerSize, self.action_size))  #输出为action_size维 输出离散动作维度维度？

        # initialise layer weights 初始化权重
        for i in range(0, len(self.layers) - 1):
            nn.init.kaiming_normal_(self.layers[i].weight, nonlinearity=activation)
            nn.init.zeros_(self.layers[i].bias)
        if output_layer_init_std is not None:
            nn.init.normal_(self.layers[-1].weight, mean=0., std=output_layer_init_std)
        # else:
        #     nn.init.zeros_(self.layers[-1].weight)
        nn.init.zeros_(self.layers[-1].bias)

    def forward(self, state, action_parameters):
        '''
        state: 环境的观察
        action_parameters: 预测的连续动作
        '''
        # implement forward
        negative_slope = 0.01

        x = torch.cat((state, action_parameters), dim=1)  #将两个张量按列拼接
        # 将观察和预测的连续动作拼接起来，然后输入网络预测离散动作的Q值
        num_layers = len(self.layers)
        for i in range(0, num_layers - 1):
            if self.activation == "relu":
                x = F.relu(self.layers[i](x))
            elif self.activation == "leaky_relu":
                x = F.leaky_relu(self.layers[i](x), negative_slope)
            else:
                raise ValueError("Unknown activation function "+str(self.activation))
        Q = self.layers[-1](x)
        return Q


class ContinuousActor(nn.Module):
    '''
    混合动作空间的连续部分生成
    ContinuousActor在本仓库的HyAR/PDQN系算法中承担“连续动作头”的角色：给定状态，产出归一化到[-1,1]的连续动作参数，并与离散动作选择头(QActor)协同完成混合动作空间的决策
    Collecting workspace informationContinuousActor在本仓库的HyAR/PDQN系算法中承担“连续动作头”的角色：给定状态，产出归一化到[-1,1]的连续动作参数，并与离散动作选择头(QActor)协同完成混合动作空间的决策。

    - 产出什么
    - 输入: 状态向量（部分变体还会拼接离散动作或其嵌入）
    - 输出: 连续动作参数向量 $a_c \in [-1,1]^{d}$，用于与离散动作索引组合成完整动作
    - 代码见：
        - `pdqn_MPE_4_direction.ContinuousActor`
        - `pdqn_MPE_direction_catch.ContinuousActor`
        - TD3/DDPG 变体同名类：如`pdqn_td3_MPE.ContinuousActor`、`pdqn.ContinuousActor`

    - 如何协同决策
    - ParamActor先给出参数动作 $a_c$，QActor再计算 $Q(s, a_c)$ 并为每个离散动作给出Q值，选取argmax离散动作。对应实现参考：
        - `pdqn_MPE_direction_catch.QActor.forward`（将state与action_parameters拼接评估各离散动作Q）
        - 这两者在Agent中一起实例化与优化，例如`pdqn_MPE_4_direction.PDQNAgent.__init__`

    - 重要实现细节
    - 多数实现含“直通层”(passthrough layer)：将状态线性映射并直接加到输出上以稳定训练，且权重冻结，见`pdqn_MPE_4_direction.ContinuousActor`与`pdqn_MPE_direction_catch.ContinuousActor`
    - 输出范围与包裹器一致：环境侧用包装器把参数动作缩放到[-1,1]，参见`common.wrappers.QPAMDPScaledParameterisedActionWrapper`
    - 训练时结合Q网络梯度对ParamActor做确定性策略梯度更新，必要时用“反向梯度”保持参数在边界内（参照各PDQN Agent中的`_invert_gradients`，如`pdqn_MPE_4_direction.PDQNAgent`）

    - 与HyAR嵌入的关系
    - 在HyAR的embedding训练流程中，ParamActor常作为预策略生成连续参数数据并填充经验池；而执行阶段也可能通过VAE解码得到参数动作（见`HyAR/embedding/ActionRepresentation_vae.py`的decode接口被上层调用）。总体目标一致：为离散动作提供其对应的连续参数，从而完成混合动作控制。
    '''

    def __init__(self, state_size, action_size, action_parameter_size, hidden_layers, squashing_function=False,
                 output_layer_init_std=None, init_type="kaiming", activation="relu", init_std=None):
        '''
        state_size: 环境观察
        action_size: 离散动作空间维度
        action_parameter_size: 连续动作参数维度
        hidden_layers: 隐藏层的层数
        squashing_fcuntion: 用于将网络的输出值约束到指定的范围，但是由于作者无法正确的实现，所以暂时禁用，无用的参数
        output_layer_init_std: todo
        init_type: 初始化类型 用于初始化权重 
        activation: 激活函数
        init_std: todo

        '''
        super(ContinuousActor, self).__init__()

        self.state_size = state_size
        self.action_size = action_size
        self.action_parameter_size = action_parameter_size
        self.squashing_function = squashing_function
        self.activation = activation
        if init_type == "normal":
            assert init_std is not None and init_std > 0
        assert self.squashing_function is False  # unsupported, cannot get scaling right yet

        # create layers
        self.layers = nn.ModuleList()
        inputSize = self.state_size
        lastHiddenLayerSize = inputSize
        if hidden_layers is not None:
            # 构造隐藏层
            nh = len(hidden_layers)
            self.layers.append(nn.Linear(inputSize, hidden_layers[0]))
            for i in range(1, nh):
                self.layers.append(nn.Linear(hidden_layers[i - 1], hidden_layers[i]))
            lastHiddenLayerSize = hidden_layers[nh - 1]
        # 构造输出连续动作的预测层
        self.action_parameters_output_layer = nn.Linear(lastHiddenLayerSize, self.action_parameter_size)
        # 对这个做特殊处理，不参与梯度计算
        # 初始权重为0
        self.action_parameters_passthrough_layer = nn.Linear(self.state_size, self.action_parameter_size)

        # initialise layer weights
        # 初始化权重
        for i in range(0, len(self.layers)):
            if init_type == "kaiming":
                nn.init.kaiming_normal_(self.layers[i].weight, nonlinearity=activation)
            elif init_type == "normal":
                nn.init.normal_(self.layers[i].weight, std=init_std)
            else:
                raise ValueError("Unknown init_type "+str(init_type))
            nn.init.zeros_(self.layers[i].bias)
        if output_layer_init_std is not None:
            nn.init.normal_(self.action_parameters_output_layer.weight, std=output_layer_init_std)
        else:
            nn.init.zeros_(self.action_parameters_output_layer.weight)
        nn.init.zeros_(self.action_parameters_output_layer.bias)

        nn.init.zeros_(self.action_parameters_passthrough_layer.weight)
        nn.init.zeros_(self.action_parameters_passthrough_layer.bias)

        # fix passthrough layer to avoid instability, rest of network can compensate
        self.action_parameters_passthrough_layer.requires_grad = False
        self.action_parameters_passthrough_layer.weight.requires_grad = False
        self.action_parameters_passthrough_layer.bias.requires_grad = False

    def forward(self, state):
        '''
        state: 输入观察
        '''
        x = state
        negative_slope = 0.01 # 根据经验选择的一个leakRelu的参数
        num_hidden_layers = len(self.layers)
        for i in range(0, num_hidden_layers):
            # 遍历每一层，根据不同的激活函数选择不同的参数
            if self.activation == "relu":
                x = F.relu(self.layers[i](x))
            elif self.activation == "leaky_relu":
                x = F.leaky_relu(self.layers[i](x), negative_slope)
            else:
                raise ValueError("Unknown activation function "+str(self.activation))
        action_params = self.action_parameters_output_layer(x) # 经过特征采集后输出的连续动作

        # print("action_params",action_params)
        # 这是一个残差连接的变体，类似ResNet中的skip connection。直通层确保即使主网络梯度消失，仍有稳定的梯度路径。
        action_params += self.action_parameters_passthrough_layer(state) # 不考虑特征采集，直接传入state后，预测输出的值加入action_params
        # print("action_params",action_params)
        if self.squashing_function:
            # 没有实现，主要目标就是要实现将参数约束到指定的范围内
            assert False  # scaling not implemented yet
            action_params = action_params.tanh()
            action_params = action_params * self.action_param_lim
        # action_params = action_params / torch.norm(action_params) ## REMOVE --- normalisation layer?? for pointmass
        return action_params 