

import numpy as np
import random


class LinearProgrammingRL:
    def __init__(self, num_vars, num_constraints, c, A, b, equality_constraints=None, bounds=None, gamma=0.9, alpha=0.1,
                 epsilon=0.1, num_episodes=1000):
        """
        初始化线性规划强化学习模型。

        参数：
            num_vars (int): 决策变量数量。
            num_constraints (int): 不等式约束数量。
            c (list): 目标函数系数列表。
            A (list): 不等式约束系数矩阵。
            b (list): 不等式约束右端项向量。
            equality_constraints (list of tuples): 等式约束，每个元素为 (变量索引列表, 等式值)。
            bounds (list of tuples): 每个变量的上下限 [(下限, 上限), ...]。
            gamma (float): 折扣因子。
            alpha (float): 学习率。
            epsilon (float): 探索率。
            num_episodes (int): 训练回合数。
        """
        self.num_vars = num_vars
        self.num_constraints = num_constraints
        self.c = np.array(c)
        self.A = np.array(A)
        self.b = np.array(b)
        self.equality_constraints = equality_constraints if equality_constraints else []
        self.bounds = bounds if bounds else [(0, np.inf)] * num_vars  # 默认每个变量的下限为0，无上限
        self.gamma = gamma
        self.alpha = alpha
        self.epsilon = epsilon
        self.num_episodes = num_episodes
        self.Q = {}

    def encode_state(self, state, num_bins=1000):
        """将状态转换为离散的元组形式，用于Q表索引，分箱数量设置为1000以提高精度。"""
        discrete_state = tuple(np.digitize(state, bins=np.linspace(0, max(state), num_bins)))
        return discrete_state

    def choose_action(self, state, episode):
        """根据 ε-贪婪策略选择动作，步长随回合数逐渐减小。"""
        encoded_state = self.encode_state(state)
        epsilon_dynamic = max(0.1, self.epsilon * (1 - episode / self.num_episodes))

        if random.uniform(0, 1) < epsilon_dynamic or encoded_state not in self.Q:
            action_var = random.randint(0, self.num_vars - 1)
            action_dir = random.choice([-1, 1])
            step_size = 0.001 * (1 - episode / self.num_episodes)  # 动态步长
        else:
            action_options = self.Q[encoded_state]
            action_var, action_dir = max(action_options, key=action_options.get)
            step_size = 0.001 * (1 - episode / self.num_episodes)

        return action_var, action_dir, step_size

    def get_reward(self, state):
        """计算当前状态的奖励值，若满足所有约束则返回目标函数值，否则返回负奖励。"""
        # 检查不等式约束
        for i in range(self.num_constraints):
            if np.dot(self.A[i], state) > self.b[i]:
                return -100  # 不满足不等式约束，给予负奖励

        # 检查等式约束
        for vars_idx, eq_value in self.equality_constraints:
            if not np.isclose(sum(state[i] for i in vars_idx), eq_value, atol=1e-3):
                return -100  # 不满足等式约束，给予负奖励

        # 检查变量的范围约束
        for i, (lower, upper) in enumerate(self.bounds):
            if not (lower <= state[i] <= upper):
                return -100  # 不满足范围约束，给予负奖励

        # 如果满足所有约束，返回目标函数值
        return np.dot(self.c, state)

    def update_q_value(self, state, action, reward, next_state):
        """使用贝尔曼方程更新Q值。"""
        encoded_state = self.encode_state(state)
        encoded_next_state = self.encode_state(next_state)

        if encoded_state not in self.Q:
            self.Q[encoded_state] = {}
        if action not in self.Q[encoded_state]:
            self.Q[encoded_state][action] = 0

        max_q_next = max(self.Q[encoded_next_state].values()) if encoded_next_state in self.Q else 0
        self.Q[encoded_state][action] += self.alpha * (reward + self.gamma * max_q_next - self.Q[encoded_state][action])

    def train(self):
        """训练Q-learning模型，增加探索随机性。"""
        for episode in range(self.num_episodes):
            state = np.random.rand(self.num_vars) * 10  # 引入随机初始状态
            while True:
                action_var, action_dir, step_size = self.choose_action(state, episode)
                next_state = np.copy(state)
                next_state[action_var] += action_dir * step_size
                next_state = np.clip(next_state, 0, 10)  # 保持在合理范围内

                reward = self.get_reward(next_state)
                self.update_q_value(state, (action_var, action_dir), reward, next_state)

                if reward >= np.dot(self.c, state):
                    break

                state = next_state

    def get_optimal_solution(self):
        """获取训练后的最优解，并确保解的精度达到3位小数。"""
        best_state = max(self.Q, key=lambda s: self.get_reward(np.array(s)))
        optimal_state = np.array(best_state) * 0.001  # 将离散化的解恢复到小数点后三位
        return np.round(optimal_state, 3), round(self.get_reward(optimal_state), 3)