import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.distributions import MultivariateNormal
from pylab import mpl
import datetime

# 设置中文显示
mpl.rcParams["font.sans-serif"] = ["SimHei"]
mpl.rcParams["axes.unicode_minus"] = False

class PID:
    def __init__(self, Kp, Ki, Kd, max_output, min_output, dt, int_max, int_min):
        self.Kp = Kp
        self.Ki = Ki
        self.Kd = Kd
        self.max = max_output
        self.min = min_output
        self.dt = dt
        self.int_max = int_max
        self.int_min = int_min
        self.integral = 0
        self.prev_error = 0
        self.prev_derivative = 0

    def update(self, error):
        # 积分限幅
        self.integral = np.clip(self.integral + error * self.dt, self.int_min, self.int_max)
        
        # 微分滤波
        derivative = (error - self.prev_error) / self.dt
        derivative = 0.6 * derivative + 0.4 * self.prev_derivative
        self.prev_derivative = derivative
        
        # PID计算
        output = self.Kp * error + self.Ki * self.integral + self.Kd * derivative
        self.prev_error = error
        
        # 输出限幅
        return np.clip(output, self.min, self.max)

class TwoStageCascadePID:
    def __init__(self, Kp1, Ki1, Kd1, Kp2, Ki2, Kd2):
        self.AngleVal = 0 #初始实际值
        self.SpeedVal2 = 0 #
        self.Angle = []
        self.AngleI = []
        self.Speed = []
        self.SpeedI = []
        self.muBiaoZhi = 50
        self.pid_outer = PID(Kp1, Ki1, Kd1, 2, -2, 0.02, 2, -2)
        self.pid_inner = PID(Kp2, Ki2, Kd2, 4.8, -4.8, 0.02, 12, -12)
    # 外环控制输入为外环误差，内环控制输入为外环误差和内环误差之和
    def update1(self, externErr): 
        outer_output = self.pid_outer.update(externErr)   # 外环输出
        return outer_output
    def update2(self, innerErr): 
        inner_output = self.pid_inner.update(innerErr)   # 内环输出
        return inner_output    # 返回外环输出和内环输出

# PPO网络
class PPONetwork(nn.Module):
    def __init__(self, input_dim, output_dim=6):
        super(PPONetwork, self).__init__()  # Call parent class constructor
        self.shared_layer = nn.Sequential(  # Shared network layers for feature extraction
            nn.Linear(input_dim, 128),  # Fully connected layer with 128 neurons
            nn.ReLU()  # ReLU activation function
        )
        self.actor = nn.Sequential(  # Define the actor (policy) network
            nn.Linear(128, output_dim),  # Fully connected layer to output action probabilities
            nn.Softmax(dim=-1)  # Softmax to ensure output is a probability distribution
        )
        self.critic = nn.Linear(128, 1)  # Define the critic (value) network to output state value
 
    def forward(self, state):  # Forward pass for the model
        shared = self.shared_layer(state)  # Pass state through shared layers
        action_probs = self.actor(shared)  # Get action probabilities from actor network
        state_value = self.critic(shared)  # Get state value from critic network
        #print("State:", action_probs,"state_value:",state_value)  # Print the state for debugging
        return action_probs, state_value  # Return action probabilities and state value

# 环境类
class Environment:
    def __init__(self):
        self.externVal = 0  # 外环状态(角度)
        self.innerVal = 0  # 内环状态(角速度)
        self.reference = 50    # 目标值
        self.state = np.array([self.externVal, self.innerVal]) # 初始状态
        self.prev_outer = 0    # 新增：记录上一时刻角度
        self.prev_inner = 0    # 新增：记录上一时刻角速度
        
    def step(self, action):
        Kp1, Ki1, _, Kp2, Ki2, _ = action        
        pid = TwoStageCascadePID(Kp1, Ki1, 0, Kp2, Ki2, 0) #返回外环输出,内环输出
        externErr = self.reference - self.externVal        
        extern_output = pid.update1(externErr)
        innerErr = extern_output - self.innerVal
        inner_output = pid.update2(innerErr)
        self.externVal += extern_output 
        self.innerVal += inner_output 
 
        # 计算状态变化率（用于检测震荡）
        outer_change = abs(self.externVal - self.prev_outer)
        inner_change = abs(self.innerVal - self.prev_inner)
        self.prev_outer = self.externVal
        self.prev_inner = self.innerVal
        
        # 动态调整平稳性惩罚系数
        change_rate = (outer_change + inner_change) / 2.0
        smooth_penalty = 0.01 * change_rate * np.exp(-0.1 * change_rate)  # 动态惩罚
        
        # 震荡检测（基于误差符号变化频率）
        oscillation_detected = externErr * self.prev_outer < 0
        oscillation_penalty = 0.1 if oscillation_detected else 0
        
        # 改进的奖励函数
        error_magnitude = np.mean(np.abs([externErr, innerErr]))
        convergence_reward = 1.0 / (1.0 + error_magnitude)
        speed_reward = np.exp(-0.2 * error_magnitude)  # 调整权重
        
        # 综合奖励
        reward = 3.0 * convergence_reward + 2.0 * speed_reward - smooth_penalty - oscillation_penalty
        
        # 改进奖励函数
        convergence_reward = 1.0 / (1.0 + error_magnitude)  # 新增收敛奖励
        speed_reward = np.exp(-0.3 * error_magnitude)  # 调整权重
        
        # 综合奖励
        reward = 2.0 * convergence_reward + speed_reward - smooth_penalty - oscillation_penalty
        
        # 综合奖励（减小动作惩罚权重）
        reward = speed_reward - smooth_penalty - oscillation_penalty - 0.0001*np.mean(np.abs(action))
        
        return np.array([externErr, innerErr]), reward

def train_ppo():
    env = Environment()      # 初始化环境
    actor = PPONetwork(2)    # 输入状态维度为2
    optimizer = optim.Adam(actor.parameters(), lr=0.0005)
    num_episodes = 2000     # 增加训练轮数
    batch_size = 50   # 增加批量训练大小
    all_rewards = []  #初始化奖励记录列表
    all_actions = []  #初始化动作记录列表
    
    for episode in range(num_episodes):
        state = torch.FloatTensor(env.state)
        total_reward = 0        # 初始化总奖励
        episode_actions = []    # 初始化动作列表
        
        for _ in range(batch_size):  # 修改为批量训练
            action, action_logprob = actor(state)
            action_np = action.detach().numpy()
            new_errors, reward = env.step(action_np)
            
            # 计算优势
            reward_tensor = torch.FloatTensor([reward])
            advantage = reward_tensor - reward_tensor.mean()
            
            # 计算损失
            loss = -action_logprob * advantage.detach()
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(actor.parameters(), 0.5)
            optimizer.step()
            
            total_reward += reward
            episode_actions.append(action_np)
            state = torch.FloatTensor(new_errors)
        
        all_rewards.append(total_reward)
        all_actions.append(np.mean(episode_actions, axis=0))
        
        if episode % 10 == 0:
            print(f"Episode {episode}: Reward={total_reward:.2f}, Errors={np.mean(np.abs(new_errors)):.3f}")
            # 新增打印参数和状态信息
            print(f"外环参数: Kp={episode_actions[-1][0]:.4f}, Ki={episode_actions[-1][1]:.4f}, Kd={episode_actions[-1][2]:.4f}")
            print(f"内环参数: Kp={episode_actions[-1][3]:.4f}, Ki={episode_actions[-1][4]:.4f}, Kd={episode_actions[-1][5]:.4f}")
            print(f"目标值: {env.reference:.2f}, 当前角度: {env.externVal:.2f}, 当前角速度: {env.innerVal:.2f}")
            print("-"*50)

    return all_rewards, all_actions

def plot_results(rewards, actions):
    plt.figure(figsize=(12, 8))
    
    # 奖励曲线
    plt.subplot(2, 1, 1)
    plt.plot(rewards)
    plt.title('训练奖励曲线')
    plt.xlabel('训练轮数')
    plt.ylabel('总奖励')
    
    # PID参数曲线
    plt.subplot(2, 1, 2)
    labels = ['外环Kp', '外环Ki', '外环Kd', '内环Kp', '内环Ki', '内环Kd']
    for i in range(6):
        plt.plot([a[i] for a in actions], label=labels[i])
    plt.title('PID参数变化')
    plt.xlabel('训练轮数')
    plt.ylabel('参数值')
    plt.legend()
    
    plt.tight_layout()
    plt.savefig('ppo_pid_tuning.png', dpi=300)
    plt.show()

if __name__ == "__main__":
    rewards, actions = train_ppo()
    plot_results(rewards, actions)
    
    # 保存最佳参数
    best_params = actions[-1]
    timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    with open('best_params.txt', 'a') as f:  # 改为追加模式
        f.write(f"\n\n=== 训练时间: {timestamp} ===\n")
        f.write("外环参数:\n")
        f.write(f"Kp: {best_params[0]:.4f}\n")
        f.write(f"Ki: {best_params[1]:.4f}\n")
        f.write(f"Kd: {best_params[2]:.4f}\n\n")
        f.write("内环参数:\n")
        f.write(f"Kp: {best_params[3]:.4f}\n")
        f.write(f"Ki: {best_params[4]:.4f}\n")
        f.write(f"Kd: {best_params[5]:.4f}\n")
        f.write("="*30 + "\n")