import numpy as np
import torch as T
from MADDPG.noise import OUActionNoise
from MADDPG.networks import ActorNetwork, CriticNetwork


class Agent:
    def __init__(self, actor_dims, critic_dims, n_actions, n_agents, num_users, agent_idx, noise_type, chkpt_dir,
                 alpha, beta, fc1, fc2, fc3, gamma, tau, desired_distance=0.7,
                 scalar_decay=0.99, scalar=0.05, normal_scalar=0.25):
        # alpha:actor网络的学习率；beta：critic网络的学习率
        # gamma：折扣因子，衡量未来奖励的权重
        # chkpt_dir 检查点目录：存储训练过程中的模型参数
        # tau；软更新的重要参数，控制当前网络参数向目标网络参数更新的速度，越小则更新速度越慢
        # 噪声相关参数
        # noise_type: 噪声类型，用于探索策略的噪声，可以是“param”或其他类型。
        # desired_distance: 期望的距离值，用于某些策略的计算，默认值为0.7
        # scalar_decay: 标量衰减率，控制标量随时间的衰减速度，默认值为0.99
        # scalar: 初始标量值，用于某些策略的计算，默认值为0.05
        # normal_scalar: 正态分布标量值，用于生成噪声，默认值为0.25
        self.num_user = num_users
        self.gamma = gamma
        self.tau = tau
        self.n_actions = n_actions
        # 格式为'agent_' + agent_idx，其中agent_idx是智能体的索引
        self.agent_name = 'agent_%s' % agent_idx
        self.actor = ActorNetwork(alpha, actor_dims, fc1, fc2, fc3, num_users,
                                  chkpt_dir=chkpt_dir, name=self.agent_name + '_actor')
        self.actor_noised = ActorNetwork(alpha, actor_dims, fc1, fc2, fc3, n_actions,
                                         chkpt_dir=chkpt_dir, name=self.agent_name + '_actor_noised')
        self.critic = CriticNetwork(beta, critic_dims,
                                    fc1, fc2, fc3, n_agents, n_actions,
                                    chkpt_dir=chkpt_dir, name=self.agent_name + '_critic')
        self.target_actor = ActorNetwork(alpha, actor_dims, fc1, fc2, fc3, num_users,
                                         chkpt_dir=chkpt_dir,
                                         name=self.agent_name + '_target_actor')
        self.target_critic = CriticNetwork(beta, critic_dims,
                                           fc1, fc2, fc3, n_agents, n_actions,
                                           chkpt_dir=chkpt_dir,
                                           name=self.agent_name + '_target_critic')
        # tau=1 进行硬更新，将当前网络参数赋值给目标网络，初始化时会进行一次硬更新
        self.update_network_parameters(tau=1)
        self.noise_type = noise_type
        self.distances = []
        self.desired_distance = desired_distance
        self.scalar_decay = scalar_decay
        self.scalar = scalar
        self.normal_scalar = normal_scalar
        self.ou_noise = [
            OUActionNoise(size=1, mu=0.0, theta=0.15, sigma=0.2),  # 飞行方向噪声
            OUActionNoise(size=1, mu=0.0, theta=0.15, sigma=0.2),  # 飞行速度噪声
            OUActionNoise(size=self.num_user, mu=0.0, theta=0.15, sigma=0.2)  # 卸载比例噪声
        ]
        self.normal_scalar_decay = 0.99995
        # 超参数的字符串表示形式，用于记录所有超参数
        self.hyperparameters = '\n'.join(f"{key:>17}: {value}" for key, value in locals().items() if key != 'self')

    # def choose_action(self, observation):
    #     state = T.tensor([observation], dtype=T.float).to(self.actor.device)
    #     actions = self.actor.forward(state)  # actor；神经网络模型
    #     noise = T.rand(self.n_actions).to(self.actor.device)
    #     action = actions + noise
    #
    #     return action.detach().cpu().numpy()[0]

    def choose_action_noised(self, observation, episode, max_episodes, add_noise=True):
        """Returns actions for given state as per current policy."""
        # 将观测值转换为张量
        state = T.tensor(observation, dtype=T.float).to(self.actor.device)

        # 获取策略网络的输出动作 (假设策略网络输出的值范围为 [-1, 1])
        raw_actions = self.actor.forward(state).detach().cpu().numpy()

        # 初始化飞行方向、速度、卸载比例（假设它们都在 [-1, 1] 范围内）
        fly_direction = raw_actions[0]  # -1 到 1
        fly_speed = raw_actions[1]  # -1 到 1
        offload_ratios = raw_actions[2:]  # 其余部分是卸载比例，每个都在 -1 到 1 之间

        # 添加噪声
        if add_noise:
            if self.noise_type == "param":
                # 使用参数噪声（类似原逻辑）
                self.actor_noised.load_state_dict(self.actor.state_dict().copy())
                self.actor_noised.add_parameter_noise(self.scalar)
                action_noised = self.actor_noised.forward(state).cpu().data.numpy()

                # 使用带噪声的动作
                fly_direction = action_noised[0]
                fly_speed = action_noised[1]
                offload_ratios = action_noised[2:]

                # 计算距离调整噪声强度
                distance = np.sqrt(np.mean(np.square(raw_actions - action_noised)))
                self.distances.append(distance)
                if distance > self.desired_distance:
                    self.scalar *= self.scalar_decay
                if distance < self.desired_distance:
                    self.scalar /= self.scalar_decay

            elif self.noise_type == "ou":
                # 为每种动作分别添加 OU 噪声
                noisy_actions = self.get_action_with_noise(episode=episode, max_episodes=max_episodes)

                fly_direction += noisy_actions[0]
                fly_speed += noisy_actions[1]
                offload_ratios += noisy_actions[2]

            elif self.noise_type == "gaussian":
                # 高斯噪声
                fly_direction += np.random.randn()
                fly_speed += np.random.randn()
                offload_ratios += np.random.randn(len(offload_ratios))
                self.normal_scalar *= self.normal_scalar_decay

        # 合并动作
        action = np.concatenate((fly_direction, fly_speed, offload_ratios))

        return np.clip(action, 0, 1)

    def get_action_with_noise(self, episode, max_episodes):
        # 获取带噪声的动作
        noisy_actions = []
        for i, noise in enumerate(self.ou_noise):
            noisy_action = noise(episode=episode, max_episodes=max_episodes)
            noisy_actions.append(noisy_action)

        return noisy_actions

    def choose_action(self, observation):
        """Returns actions for given state as per current policy."""
        # 将观测值转换为张量
        state = T.tensor(observation, dtype=T.float).to(self.actor.device)
        # 获取策略网络的输出动作
        actions = self.actor.forward(state).detach().cpu().numpy()

        return actions

    def update_network_parameters(self, tau=None):  # 参数软更新
        if tau is None:
            tau = self.tau

        target_actor_params = self.target_actor.named_parameters()
        actor_params = self.actor.named_parameters()

        target_actor_state_dict = dict(target_actor_params)
        actor_state_dict = dict(actor_params)
        for name in actor_state_dict:
            actor_state_dict[name] = tau * actor_state_dict[name].clone() + \
                                     (1 - tau) * target_actor_state_dict[name].clone()

        self.target_actor.load_state_dict(actor_state_dict)

        target_critic_params = self.target_critic.named_parameters()
        critic_params = self.critic.named_parameters()

        target_critic_state_dict = dict(target_critic_params)
        critic_state_dict = dict(critic_params)
        for name in critic_state_dict:
            critic_state_dict[name] = tau * critic_state_dict[name].clone() + \
                                      (1 - tau) * target_critic_state_dict[name].clone()

        self.target_critic.load_state_dict(critic_state_dict)

    def save_models(self):
        self.actor.save_checkpoint()
        self.target_actor.save_checkpoint()
        self.critic.save_checkpoint()
        self.target_critic.save_checkpoint()

    def load_models(self):
        self.actor.load_checkpoint()
        self.target_actor.load_checkpoint()
        self.critic.load_checkpoint()
        self.target_critic.load_checkpoint()
