import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from collections import deque
import random

class MPCController:
    def __init__(self, A, B, C, N, Q, R, u_min, u_max, reg=1e-5):
        self.A = A
        self.B = B
        self.C = C
        self.N = N
        self.Q = Q
        self.R = R
        self.u_min = u_min
        self.u_max = u_max
        self.reg = reg  # 正则化参数
        self.measurement_history = []
        self.setpoint_history = []

    def mpc_control(self, x, r):
        x = x.reshape(-1,1)
        r = r.reshape(-1,1)
        n_states = self.A.shape[0]
        n_controls = self.B.shape[1]

        # 构建优化问题
        H = np.zeros((self.N * n_controls, self.N * n_controls))
        F = np.zeros((self.N * n_controls, 1))

        for i in range(self.N):
            H[i * n_controls:(i + 1) * n_controls, i * n_controls:(i + 1) * n_controls] = self.R
            F[i * n_controls:(i + 1) * n_controls, 0] = 0

        G = np.zeros((self.N * n_states, self.N * n_controls))
        E = np.zeros((self.N * n_states, n_states))
        L = np.zeros((self.N * n_states, 1))

        for i in range(self.N):
            for j in range(i + 1):
                G[i * n_states:(i + 1) * n_states, j * n_controls:(j + 1) * n_controls] = np.linalg.matrix_power(self.A,
                                                                                                                 i - j) @ self.B
            E[i * n_states:(i + 1) * n_states, :] = np.linalg.matrix_power(self.A, i + 1)
            L[i * n_states:(i + 1) * n_states, :] = np.linalg.matrix_power(self.A, i + 1) @ x - r

        Q_bar = np.kron(np.eye(self.N), self.Q)
        H += G.T @ Q_bar @ G
        F += (G.T @ Q_bar @ L).reshape(-1, 1)

        # 计算最优控制输入
        u_opt = -np.linalg.inv(H) @ F

        # 取第一个控制输入
        u = u_opt[:n_controls]

        return u

    def update(self, setpoint, measurement):
        self.measurement_history.append(measurement)
        self.setpoint_history.append(setpoint)
        u = self.mpc_control(np.array(measurement), np.array(setpoint))
        return u

    def plot_error(self, title='none'):
        self.setpoint_history = np.array(self.setpoint_history)
        self.measurement_history = np.array(self.measurement_history)
        for i in range(len(self.setpoint_history[0])):
            fig, ax = plt.subplots()
            ax.plot([i / 100 for i in range(len(self.setpoint_history[:, i]))], self.setpoint_history[:, i],
                    label='setpoint')
            ax.plot([i / 100 for i in range(len(self.measurement_history[:, i]))], self.measurement_history[:, i],
                    label='measurement')
            ax.set_xlabel('time')
            ax.set_ylabel('value')
            ax.set_title(title)
            ax.legend()

class DQN(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(state_dim, 128)
        self.fc2 = nn.Linear(128, 128)
        self.fc3 = nn.Linear(128, action_dim)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)

class Agent:
    def __init__(self, state_dim, action_dim, gamma=0.99, epsilon=1.0, lr=1e-3, batch_size=64, max_memory=1000):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.gamma = gamma
        self.epsilon = epsilon
        self.lr = lr
        self.batch_size = batch_size
        self.memory = deque(maxlen=max_memory)
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        self.model = DQN(state_dim, action_dim).to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
        self.criterion = nn.MSELoss()

    def act(self, state):
        if random.random() < self.epsilon:
            return np.random.uniform(-1, 1, self.action_dim)
        state = torch.FloatTensor(state).to(self.device)
        with torch.no_grad():
            q_values = self.model(state)
        return q_values.cpu().numpy()

    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def replay(self):
        if len(self.memory) < self.batch_size:
            return

        batch = random.sample(self.memory, self.batch_size)
        state, action, reward, next_state, done = zip(*batch)

        # Convert lists of numpy arrays to single numpy arrays
        state = np.array(state)
        action = np.array(action)
        reward = np.array(reward)
        next_state = np.array(next_state)
        done = np.array(done)

        state = torch.FloatTensor(state).to(self.device)
        action = torch.FloatTensor(action).to(self.device)
        reward = torch.FloatTensor(reward).to(self.device)
        next_state = torch.FloatTensor(next_state).to(self.device)
        done = torch.FloatTensor(done).to(self.device)

        q_values = self.model(state)
        next_q_values = self.model(next_state).detach()

        # Assuming action is continuous, you should select a way to convert it to discrete action indices
        action_indices = torch.argmax(action, dim=1).unsqueeze(1).long()

        q_value = q_values.gather(1, action_indices).squeeze(1)
        next_q_value = next_q_values.max(1)[0]
        expected_q_value = reward + self.gamma * next_q_value * (1 - done)

        loss = self.criterion(q_value, expected_q_value)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

    def update_epsilon(self, min_epsilon=0.01, decay=0.995):
        self.epsilon = max(min_epsilon, self.epsilon * decay)

if __name__ == '__main__':
    # Define system matrices
    A = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    B = np.array([[0.5, 0.6, 0.2], [1, 0.1, 0.4], [0.5, 0.5, 0.8]])
    C = np.eye(3)

    # Initialize the agent and MPC
    agent = Agent(state_dim=3, action_dim=3)
    Q = np.eye(3) * 2
    R = np.eye(1)
    N = 10
    u_min = -1
    u_max = 1
    reg = 1e-5

    best_reward = -np.inf  # Initialize best reward
    best_state_history = None  # To store the best state history

    for epoch in range(1000):
        # Reset state for each epoch
        mpc = MPCController(A, B, C, N, Q, R, u_min, u_max, reg)
        setpoint = np.array([[1], [1], [1]])
        state = np.array([[0], [0], [0]])
        state_dot = np.array([[0], [0], [0]])
        total_reward = 0  # Reset total reward for this epoch

        for _ in range(10):
            action = agent.act(state.flatten())
            mpc.Q = np.diag(action)  # Update Q with the action taken by the agent
            u = mpc.update(setpoint, state)
            state_dot = A @ state_dot + B @ u.reshape(-1, 1)
            state = A @ state + B @ state_dot.reshape(-1, 1)
            # print(state)
            reward = -np.linalg.norm(setpoint - state)  # Negative distance as reward
            total_reward += reward

            agent.remember(state.flatten(), action, reward, state_dot.flatten(), False)
            agent.replay()

        agent.update_epsilon()

        # Check if this is the best performance so far
        if total_reward > best_reward:
            best_reward = total_reward
            best_state_history = mpc.setpoint_history  # Save the best state history

    # Plot the best result
    if best_state_history is not None:
        mpc.setpoint_history = best_state_history
        mpc.plot_error(title="Best MPC Result")

    plt.show()
