import numpy as np
import matplotlib.pyplot as plt
import random
from pyswarm import pso

class GPC:
    def __init__(self, theta0, beta, N, Nu, lamda, alpha, init_u, init_y, na, nb):
        self.theta0 = theta0  # 反馈矫正
        self.beta = beta  # 反馈矫正
        self.N = N  # 预测视野
        self.Nu = Nu  # 控制视野
        self.lamda = lamda  # 能量代价的权重
        self.alpha = alpha  # 柔化因子
        self.init_u = init_u  # 历史输入=[-1时刻，-2时刻]
        self.init_y = init_y  # 历史输出=[-1时刻，-2时刻]
        self.na = na  # a的长度
        self.nb = nb  # b的长度
        self.theta_k = []  # 无模型初始化


    def setTargetTraj(self, target_state=10):
        """
        给定目标信号
        """
        self.soft_traj = []
        for i in range(0, self.N):
            if i == 0:
                self.soft_traj.append(self.alpha * self.init_y[0] + (1 - self.alpha) * target_state)
            else:
                self.soft_traj.append(self.alpha * self.soft_traj[i - 1] + (1 - self.alpha) * target_state)

    def setModel(self, A=[1, -1.5, 0.7], B=[1, 0.5]):

        self.A = A
        self.B = B


    def optimizeControlIncrement(self, G, delta_y):
        lb = [-1.0] * len(G.T)
        ub = [1.0] * len(G.T)
        best_delta_Uk, _ = pso(self.objective_function, lb, ub, args=(G, delta_y), swarmsize=30, maxiter=100)
        return best_delta_Uk

    def calU(self):
        """
        计算G和H，进而计算输出控制量
        """
        # A_minus1: e^{-1}*A
        self.A_minus1 = [0]
        for value in self.A:
            self.A_minus1.append(value)
        # A_overline = A*DELTA = A*(1-e^{-1})=A-A_minus
        self.A_overline = [-value for value in self.A_minus1]
        for i in range(len(self.A)):
            self.A_overline[i] += self.A[i]
        E = [[1]]
        temp_F = []
        for i, value in enumerate(self.A_overline):
            if i != 0:
                temp_F.append(-value)
        F = [temp_F]
        for j in range(0, self.N):
            # temp_E
            e_j = F[j][0]
            # print('-')
            # print(e_j)
            temp_E = E[j].copy()
            temp_E.append(e_j)
            E.append(temp_E)
            # temp_F
            temp_F = [0 for i in range(max(len(F[j]), len(self.A_overline)))]
            for i, value in enumerate(F[j]):
                temp_F[i] += value
            for i, value in enumerate(self.A_overline):
                temp_F[i] += -value * e_j
            temp_F_shift = []
            for i, value in enumerate(temp_F):
                if i != 0:
                    temp_F_shift.append(value)
            F.append(temp_F_shift)
        Ej_B = []
        for j in range(0, self.N):
            temp_Ej_B = [0 for i in range(max(len(E[j]) + 1, len(self.B)))]
            for i, value in enumerate(E[j]):
                temp_Ej_B[i] += value * self.B[0]
                temp_Ej_B[i + 1] += value * self.B[1]
            Ej_B.append(temp_Ej_B)
        # print(Ej_B)

        # G_j & H_j
        G_j = []
        H_j = []
        for j in range(0, self.N):
            G_j = Ej_B[j][:j + 1]
            H_j.append(Ej_B[j][j + 1])
        # print(G_j)
        # print(H_j)

        # y_0
        Y_k_plus_j = []
        for j in range(0, self.N):
            temp_Fj_y = 0
            for i, value in enumerate(F[j]):
                if i == 0:
                    temp_Fj_y += value * self.Yk
                else:
                    temp_Fj_y += value * self.init_y[i - 1]
            temp_Y_k_plus_j = H_j[j] * (self.init_u[0] - self.init_u[1]) + temp_Fj_y
            Y_k_plus_j.append(temp_Y_k_plus_j)

        # G
        G = np.zeros([self.N, self.Nu])
        for j in range(self.N):
            for i in range(self.Nu):
                if i + j >= self.N:
                    break
                G[j + i][i] = G_j[j]
        G = np.array(G)
        # print(G)
        # combineation
        res = np.linalg.inv(np.dot(G.T, G) + self.lamda * np.array(np.eye(len(G.T))))
        # delta_y
        delta_y = np.array(self.soft_traj) - np.array(Y_k_plus_j)
        delta_Uk = self.optimizeControlIncrement(G, delta_y)
        Y_prdict = np.array(Y_k_plus_j) + np.dot(G, delta_Uk)
        return self.init_u[0] + delta_Uk[0]

    def objective_function(delta_Uk, G, delta_y):
        return np.sum((np.dot(G, delta_Uk) - delta_y) ** 2)
    objective_function.iter_count = 1

    def optimizeControlIncrement(self, G, delta_y):
        lb = [-1.0] * len(G.T)
        ub = [1.0] * len(G.T)
        best_delta_Uk, _ = pso(lambda delta_Uk: np.sum((np.dot(G, delta_Uk) - delta_y) ** 2), lb, ub, swarmsize=50, maxiter=100)
        return best_delta_Uk

    def setYk(self, Yk):
        self.Yk = Yk
        # print('YK:',self.Yk)


class env():
    def __init__(self, A=[1, -1.5, 0.7], B=[1, 0.5], init_u=[0, 1], init_y=[0, 0.2], sigma=0.01, N=10):
        # 初始化仿真环境参数
        self.A = A  # 模型参数，仅用于有模型控制
        self.B = B  # 模型参数，仅用于有模型控制
        self.u = init_u  # k-1和k-2时刻的控制量
        self.y = init_y  # k-1和k-2时刻的输出量
        self.save_u = self.u.copy()  # 保存历史u
        self.save_y = self.y.copy()  # 保存历史y
        self.sigma = sigma  # 噪声参数
        self.na = len(A) - 1  # A的待求参数量，仅用于无模型控制
        self.nb = len(B)  # B的待求参数量，仅用于无模型控制

    def controlWithU(self, u=None):
        # 模拟系统响应并更新状态
        Yk = (1 / self.A[0]) * (
                    -self.A[1] * self.y[0] - self.A[2] * self.y[1] + self.B[0] * self.u[0] + self.B[1] * self.u[
                1] + random.random() * self.sigma)  # 噪声
        #print('reached state', Yk)
        if u != None:
            self.u[1] = self.u[0]
            self.u[0] = u
            self.save_u.append(u)
        self.y[1] = self.y[0]
        self.y[0] = Yk
        self.save_y.append(Yk)

    def getCurrentY(self):
        # 获取当前状态
        Yk = (1 / self.A[0]) * (
                    -self.A[1] * self.y[0] - self.A[2] * self.y[1] + self.B[0] * self.u[0] + self.B[1] * self.u[
                1] + random.random() * self.sigma)
        return Yk

    def setTarget(self, target=20):
        # 设置目标值
        self.target = target

    def createGPC(self, theta0=[1.5, -0.7, 1.0, 0.5], beta=1, N=10, Nu=5, lamda=0.3, alpha=0.2):
        # 创建GPC模型
        self.GPC_MODEL = GPC(theta0, beta, N, Nu, lamda, alpha, self.u, self.y, self.na, self.nb)

    def calOutput(self, Yk, has_model=False):
        self.GPC_MODEL.setYk(Yk)
        if has_model:
            self.GPC_MODEL.setModel(self.A, self.B)
        else:
            flag, info = self.GPC_MODEL.calModel(self.save_y, self.save_u)
            if flag == False:
                print(info)
                return self.save_u[-1]
        self.GPC_MODEL.setTargetTraj(self.target)
        u = self.GPC_MODEL.calU()
        return u
    def show(self, target):
        sim_time = [i * 0.1 for i in range(200)]
        plt.figure(1)
        # Plot target trajectory
        plt.plot(sim_time, target, label='Target Trajectory', color='red')
        gpc_trajectory = self.save_y[2:2 + len(sim_time)]
        plt.plot(sim_time, gpc_trajectory, label='GPC Trajectory', color='blue')
        plt.show()
        plt.close()  # Close the current figure

def run_simulation():
    # 初始化仿真环境
    sim_env = env(A=[1, -1.5, 0.7], B=[1, 0.5], init_u=[0, 1], init_y=[0, 0.2], sigma=0.01)
    fb = []
    for i in range(4):
        if i % 2 == 0:
            fb.extend([0 for j in range(50)])
        else:
            fb.extend([20 for j in range(50)])

    sim_env.createGPC(theta0=[1.5, -0.7, 1.0, 0.5], beta=1, N=10, Nu=5, lamda=0.3, alpha=0.2)

    control_input = []
    sim_time = [i * 0.1 for i in range(200)]

    for i in range(len(fb)):
        sim_env.setTarget(fb[i])
        Yk = sim_env.getCurrentY()
        u = sim_env.calOutput(Yk, has_model=True)
        control_input.append(u)
        sim_env.controlWithU(u)
    sim_env.show(fb)

    plt.figure(2)
    plt.plot(sim_time, control_input, label='Control Input')
    plt.xlabel('Time')
    plt.ylabel('Control Input')
    plt.title('控制输入')
    plt.legend()
    plt.show()
    plt.show(block=False)

if __name__ == "__main__":
    run_simulation()