import os
import random

import gymnasium as gym
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.optim import Adam

RENDER = False
loss_history = []


# 初始化神经网络层权重
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
    torch.nn.init.orthogonal_(layer.weight, std)
    torch.nn.init.constant_(layer.bias, bias_const)
    return layer


# 利用当前策略进行采样，产生数据
class Sample:
    def __init__(self, env):
        self.env = env
        self.gamma = 0.99
        self.lamda = 0.95
        self.last_state = None
        self.batch_state = None
        self.batch_act = None
        self.batch_logp = None
        self.batch_adv = None
        self.batch_val_target = None
        self.index = None
        self.episode_return = 0
        self.sum_return = 0

    # 以policy_net网络进行采样1条轨迹,返回构建动作网络损失函数所需要的数据obs, log_as, adv，构建值网络所需要的数据：val_target
    def sample_one_episode(self, actor_net, v_net):
        # 产生num_episodes条轨迹
        Flag = 0
        val_target = 0
        episode_obs = []
        episode_vals = []
        episode_delta = []
        episode_actions = []
        episode_log_as = []
        episode_rewards = []
        episode_val_target = []
        episode_adv = []
        done = False
        num_episodes = 1
        episode_sum = 0
        # print("当前目标：",self.env.target)
        for i in range(num_episodes):
            cur_obs = self.env.reset()[0]  # 只取观测值部分
            steps = 0
            while True:
                episode_obs.append(cur_obs)
                # 采样动作，及动作的对数，不计算梯度
                action, log_a = actor_net.get_a(
                    torch.as_tensor(cur_obs, dtype=torch.float32)
                )
                episode_actions.append(action)
                episode_log_as.append(log_a)
                cur_v = v_net.get_v(
                    torch.as_tensor(cur_obs, dtype=torch.float32)
                ).item()
                episode_vals.append(cur_v)
                episode_adv.append(0.0)
                episode_delta.append(0.0)
                episode_val_target.append(0.0)
                # 往前推进一步
                # for j in range(40):
                next_obs, reward, terminated, truncated, _ = self.env.step(action)
                done = terminated or truncated
                cur_obs = next_obs
                episode_rewards.append(reward)
                steps = steps + 1
                # 处理回报
                if done:
                    self.last_state = next_obs
                    val_target = v_net.get_v(
                        torch.as_tensor(self.last_state, dtype=torch.float32)
                    ).item()
                    episode_vals.append(val_target)
                    # print("val_target:",val_target)
                    discounted_sum_reward = np.zeros_like(episode_rewards)
                    # 计算mbatch折扣累积回报
                    # print("总长度：",len(episode_rewards))
                    for t in reversed(range(0, len(episode_rewards))):
                        val_target = episode_rewards[t] + val_target * self.gamma
                        discounted_sum_reward[t] = val_target
                        episode_sum = episode_sum + episode_rewards[t]
                        episode_delta[t] = val_target - episode_vals[t]
                    self.episode_return = episode_sum
                    # print("eposode_return",self.episode_return)
                    # 利用gae计算adv
                    adv = 0.0
                    for t in reversed(range(0, len(episode_rewards))):
                        adv = episode_delta[t] + adv * self.gamma * self.lamda
                        episode_adv[t] = adv
                    for j in range(len(episode_rewards)):
                        episode_val_target[j] = discounted_sum_reward[j]
                    break
            # 需要转换的数据为batch_obs, batch_act,batch_logp, batch_adv,batch_val_target
            episode_obs = np.reshape(episode_obs, [len(episode_obs), 3])
            episode_actions = np.reshape(episode_actions, [len(episode_actions), 1])
            episode_log_as = np.reshape(episode_log_as, [len(episode_log_as), 1])
            # 目标值函数
            episode_val_target = np.reshape(
                episode_val_target, [len(episode_val_target), 1]
            )
            episode_adv = np.reshape(episode_adv, [len(episode_adv), 1])
        return (
            episode_obs,
            episode_actions,
            episode_log_as,
            episode_adv,
            episode_val_target,
        )

    def test_sample_one_episode(self, actor_net, v_net):
        Flag = 0
        val_target = 0
        episode_obs = []
        episode_vals = []
        episode_delta = []
        episode_actions = []
        episode_log_as = []
        episode_rewards = []
        episode_val_target = []
        episode_adv = []
        done = False
        num_episodes = 1
        count = 1
        episode_sum = 0
        for i in range(num_episodes):
            cur_obs = self.env.reset()[0]  # 只取观测值部分
            steps = 0
            while True:
                episode_obs.append(cur_obs)
                # 采样动作，及动作的对数，不计算梯度
                action, log_a = actor_net.get_a(
                    torch.as_tensor(cur_obs, dtype=torch.float32)
                )
                episode_actions.append(action.item())
                episode_log_as.append(log_a)
                cur_v = v_net.get_v(
                    torch.as_tensor(cur_obs, dtype=torch.float32)
                ).item()
                episode_vals.append(cur_v)
                episode_adv.append(0.0)
                episode_delta.append(0.0)
                episode_val_target.append(0.0)
                # 往前推进一步
                next_obs, reward, terminated, truncated, _ = self.env.step(action)
                done = terminated or truncated
                # 渲染环境
                self.env.render()
                cur_obs = next_obs
                episode_rewards.append(reward)
                steps = steps + 1
                # 处理回报
                if done:
                    self.last_state = next_obs
                    val_target = v_net.get_v(
                        torch.as_tensor(self.last_state, dtype=torch.float32)
                    ).item()
                    episode_vals.append(val_target)
                    discounted_sum_reward = np.zeros_like(episode_rewards)
                    # 计算mbatch折扣累积回报
                    # print("总长度：",len(episode_rewards))
                    for t in reversed(range(0, len(episode_rewards))):
                        val_target = episode_rewards[t] + val_target * self.gamma
                        episode_val_target[t] = val_target
                        episode_sum += episode_rewards[t]
                    break

    def sample_many_episodes(self, actor_net, v_net, num):
        self.sum_return = 0
        self.Flag = 0
        episode_sum = 0
        batch_state, batch_act, batch_logp, batch_adv, batch_val_target = (
            self.sample_one_episode(actor_net, v_net)
        )
        self.sum_return = self.episode_return
        for i in range(num):
            (
                episode_state,
                episode_act,
                episode_logp,
                episode_adv,
                episode_val_target,
            ) = self.sample_one_episode(actor_net, v_net)
            self.sum_return = self.sum_return + self.episode_return
            batch_state = np.concatenate((batch_state, episode_state), 0)
            batch_act = np.concatenate((batch_act, episode_act), 0)
            batch_logp = np.concatenate((batch_logp, episode_logp), 0)
            batch_adv = np.concatenate((batch_adv, episode_adv), 0)
            batch_val_target = np.concatenate((batch_val_target, episode_val_target), 0)
        self.batch_state = batch_state
        self.batch_act = batch_act
        self.batch_logp = batch_logp
        self.batch_adv = batch_adv
        self.batch_val_target = batch_val_target

    def get_data(self, start_index, sgd_num):
        sgd_batch_state = np.zeros((sgd_num, 3))
        sgd_batch_act = np.zeros((sgd_num, 1))
        sgd_batch_logp = np.zeros((sgd_num, 1))
        sgd_batch_adv = np.zeros((sgd_num, 1))
        sgd_batch_val_target = np.zeros((sgd_num, 1))
        for i in range(sgd_num):
            sgd_batch_state[i, :] = self.batch_state[self.index[start_index + i], :]
            sgd_batch_act[i, :] = self.batch_act[self.index[start_index + i], :]
            sgd_batch_logp[i, :] = self.batch_logp[self.index[start_index + i], :]
            sgd_batch_adv[i, :] = self.batch_adv[self.index[start_index + i], :]
            sgd_batch_val_target[i, :] = self.batch_val_target[
                self.index[start_index + i], :
            ]
        # 在minibatch中归一化sgd_batch_val
        # sgd_batch_adv = (sgd_batch_adv-sgd_batch_adv.mean())/sgd_batch_adv.std()
        # print("随机优势值函数：",sgd_batch_adv)
        # 将numpy数据转化为torch数据
        sgd_batch_state = torch.as_tensor(sgd_batch_state, dtype=torch.float32)
        sgd_batch_act = torch.as_tensor(sgd_batch_act, dtype=torch.float32)
        sgd_batch_logp = torch.as_tensor(sgd_batch_logp, dtype=torch.float32)
        sgd_batch_adv = torch.as_tensor(sgd_batch_adv, dtype=torch.float32)
        sgd_batch_val_target = torch.as_tensor(
            sgd_batch_val_target, dtype=torch.float32
        )
        return (
            sgd_batch_state,
            sgd_batch_act,
            sgd_batch_logp,
            sgd_batch_adv,
            sgd_batch_val_target,
        )


# 策略网络类，构建策略，并进行采样
class Actor_Net(nn.Module):
    def __init__(self, obs_dim, act_dim, hidden_sizes):
        super(Actor_Net, self).__init__()
        self.act_dim = act_dim
        # 标准差的对数
        log_std = -0.5 * np.ones(act_dim, dtype=np.float32)
        self.log_std = torch.nn.Parameter(torch.as_tensor(log_std))
        # self.log_std = torch.as_tensor(log_std)
        # 均值和标准差网络，利用前向神经网路
        self.actor_net = nn.Sequential(
            layer_init(nn.Linear(obs_dim, hidden_sizes[0])),
            nn.ReLU(),
            layer_init(nn.Linear(hidden_sizes[0], hidden_sizes[1])),
            nn.ReLU(),
            layer_init(nn.Linear(hidden_sizes[1], act_dim), std=1.0),
            nn.Tanh(),
        )
        self.actor_net.requires_grad_()

    # 计算分布
    def distribution(self, obs):
        mu = self.actor_net(obs)
        log_std = self.log_std.expand_as(mu)
        std = torch.exp(log_std)
        return Normal(mu, std)

    # 计算单个状态处分布的对数
    def log_prob_from_distribution(self, dist, act):
        return dist.log_prob(act).sum()

    # 计算批状态策略对数
    def _log_prob_from_distribution(self, dist, act):
        return dist.log_prob(act).sum(axis=1)

    # 返回概率分布及动作act的概率，计算梯度，计算动作损失函数时使用
    def forward(self, obs, act=None):
        dist = self.distribution(obs)
        logp_a = None
        if act is not None:
            logp_a = self._log_prob_from_distribution(dist, act)
        return dist, logp_a, dist.entropy().sum(axis=1)

    # 采样一个动作，不计算梯度,产生数据样本时使用
    def get_a(self, obs):
        with torch.no_grad():
            dist = self.distribution(obs)
            action = dist.sample()
            # 截取（-1,1）的值
            action = torch.clip(action, -2, 2)
            log_a = self.log_prob_from_distribution(dist, action)
            # print("action:", action)
        return action.numpy(), log_a.numpy()


# 值函数网络类，构建值函数
class Critic_Net(nn.Module):
    def __init__(self, obs_dim, hidden_sizes):
        super(Critic_Net, self).__init__()
        self.v_net = nn.Sequential(
            layer_init(nn.Linear(obs_dim, hidden_sizes[0])),
            nn.ReLU(),
            layer_init(nn.Linear(hidden_sizes[0], hidden_sizes[1])),
            nn.ReLU(),
            layer_init(nn.Linear(hidden_sizes[1], 1), std=1.0),
        )
        self.v_net.requires_grad_()

    # 返回值函数预测值，计算梯度，计算值函数损失时使用
    def forward(self, obs):
        return torch.squeeze(self.v_net(obs), -1)

    # 计算值函数的数值，不计算梯度，计算目标函数时使用
    def get_v(self, obs):
        with torch.no_grad():
            v = self.v_net(obs)
        return v.numpy()


# PPO算法类，实现策略的更新
class PPO:
    def __init__(self, env):
        self.env = env
        self.sampler = Sample(env)
        self.obs_dim = 3
        self.act_dim = 1
        self.hidden = [32, 32]
        self.pi_lr = 0.00004
        self.critic_lr = 0.0001
        self.clip_ratio = 0.2
        self.train_pi_iters = 10
        self.sgd_num = 256
        self.epochs = 10000
        self.save_freq = 100
        self.episodes_num = 100
        self.target_kl = 1.0
        self.return_traj = []
        self.succ_rate_traj = []
        self.actor = Actor_Net(self.obs_dim, self.act_dim, self.hidden)
        self.pi_optimizer = Adam(self.actor.parameters(), self.pi_lr)
        self.critic = Critic_Net(self.obs_dim, self.hidden)
        self.critic_optimizer = Adam(self.critic.parameters(), self.critic_lr)
        self.training_path = "D:\\book\\code\\ppo_pendulm"
        self.actor_filename = "ppo_actor.pth"
        self.critic_filename = "ppo_critic.pth"

    # 计算策略损失函数
    def compute_loss_pi(self, obs, act, logp_old, adv):
        act_dist, logp, entropy = self.actor(obs, act)
        num = logp_old.size()[0]
        # torch.reshape(logp,(num,1))修改维度保持一致
        logp = logp.reshape(num, 1)
        # print("logp_old:",logp_old)
        # print("log_p",logp)
        ratio = torch.exp(logp - logp_old)
        clip_adv = torch.clamp(ratio, 1 - self.clip_ratio, 1 + self.clip_ratio) * adv
        loss_pi = -(torch.min(ratio * adv, clip_adv)).mean()
        # print("loss:",loss_pi)
        loss_entropy = entropy.mean()
        approx_kl = (logp_old - logp).mean().item()
        return loss_pi, loss_entropy, approx_kl

    # 计算值损失
    def compute_loss_critic(self, obs, val_target):
        return ((self.critic(obs) - val_target) ** 2).mean()

    # 采集数据，并进行更新
    def update(self):
        # 指定目标位置，采集数据,得到批数据
        # batch_state, batch_act, batch_logp, batch_adv, batch_val_target=self.sampler.sample_one_episodes(self.actor,self.critic)
        self.sampler.sample_many_episodes(self.actor, self.critic, self.episodes_num)
        self.return_traj.append(self.sampler.sum_return / 101)
        # self.succ_rate_traj.append(self.sampler.sum_succ_episode / 101)
        print("当前回报：", self.sampler.sum_return / 101)
        # print("当前成功率：", self.sampler.sum_succ_episode / 101)
        np.savetxt("ppo_return_traj.txt", self.return_traj)
        # np.savetxt('succ_rate.txt', self.succ_rate_traj)
        batch_size = self.sampler.batch_state.shape[0]
        # print("batch_size", batch_size)
        self.sampler.index = np.arange(batch_size)
        loss_num = 0.0
        # 利用采集的数据训练策略网络和值网络
        for i in range(self.train_pi_iters):
            np.random.shuffle(self.sampler.index)
            approx_kl = 0.0
            for start_index in range(0, batch_size - self.sgd_num, self.sgd_num):
                # 采集mini批数据，计算随机梯度
                # print("start_index:",start_index)
                batch_state, batch_act, batch_logp, batch_adv, batch_val_target = (
                    self.sampler.get_data(start_index, self.sgd_num)
                )
                # 1.清除梯度值
                self.pi_optimizer.zero_grad()
                self.critic_optimizer.zero_grad()
                # 2. 构建损失函数
                loss_pi, loss_entropy, approx_kl = self.compute_loss_pi(
                    batch_state, batch_act, batch_logp, batch_adv
                )
                # 是否使用
                if approx_kl > 1.5 * self.target_kl:
                    print(f"Early stooping at step {start_index} due to max kl")
                    break
                loss_critic = self.compute_loss_critic(batch_state, batch_val_target)
                loss = loss_pi + 0.5 * loss_critic - 0.001 * loss_entropy
                # loss = loss_pi + 0.5 * loss_critic
                # print("loss:",loss)
                # 3. 反向传播梯度
                loss.backward()
                # 限制最大梯度
                nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
                nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
                # 4. 参数更新一步
                self.pi_optimizer.step()
                # print("更新后标准差：", self.actor.log_std)
                self.critic_optimizer.step()
                if i == self.train_pi_iters - 1:
                    loss_history.append(loss.item())
            if i == 0:
                print("标准差：", self.actor.log_std)

    def ppo_train(self, epochs=None):
        self.epochs = epochs
        for epoch in range(self.epochs):
            print("训练次数:", epoch)
            self.update()
            # #探索衰减
            # self.actor.log_std-=0.01
            if (epoch + 1) % self.save_freq == 0:
                # 每训练50次增加0.25
                torch.save(
                    self.actor.state_dict(),
                    os.path.join(self.training_path, self.actor_filename + str(epoch)),
                )
                torch.save(
                    self.critic.state_dict(),
                    os.path.join(self.training_path, self.critic_filename + str(epoch)),
                )
        np.savetxt("ppo_loss_history.txt", loss_history)
        plt.plot(loss_history)
        plt.show()

    def test_one_episode(self):
        # 指定目标位置，采集数据,得到批数据
        self.env.Render = True
        self.sampler.test_sample_one_episode(self.actor, self.critic)

    def load_model(self, training_path, actor_filename, critic_filename):
        self.actor.load_state_dict(
            torch.load(os.path.join(training_path, actor_filename))
        )
        self.critic.load_state_dict(
            torch.load(os.path.join(training_path, critic_filename))
        )


if __name__ == "__main__":
    # 固定随机种子
    random.seed(1)
    np.random.seed(1)
    torch.manual_seed(1)
    # 构建单摆类
    env_name = "Pendulum-v1"
    # 训练时不需要渲染，提高速度
    env = gym.make(env_name)
    initial_state, _ = env.reset(seed=1)  # 设置种子并获取初始观测值
    env.unwrapped
    print(f"观测空间: {env.observation_space}")
    print(f"动作空间: {env.action_space.high}, 初始状态: {initial_state}")
    # 定义力矩取值区间
    action_bound = [-env.action_space.high, env.action_space.high]

    # 创建PPO实例
    pendulum_ppo = PPO(env)

    # 修改训练路径为Linux格式
    pendulum_ppo.training_path = "/home/robot/projects/pendulum/saved_models"

    # 确保保存目录存在
    os.makedirs(pendulum_ppo.training_path, exist_ok=True)

    # 开始训练
    print("开始PPO训练...")
    pendulum_ppo.ppo_train(5000)  # 训练1000轮

    # 保存训练结果
    np.savetxt("ppo_return_traj.txt", pendulum_ppo.return_traj)

    # 训练完成后绘制回报曲线
    plt.figure(figsize=(10, 6))
    plt.plot(pendulum_ppo.return_traj)
    plt.title("PPO Training Returns")
    plt.xlabel("Episode")
    plt.ylabel("Average Return")
    plt.savefig("ppo_training_curve.png")
    plt.show()

    # 测试训练好的模型
    print("训练完成，开始测试...")
    # 创建一个渲染环境用于测试
    test_env = gym.make(env_name, render_mode="human")
    pendulum_ppo.env = test_env
    pendulum_ppo.sampler.env = test_env
    pendulum_ppo.test_one_episode()

    # 关闭环境
    env.close()
    test_env.close()
