# -*- coding:utf-8 -*-

import numpy as np
import random

class LinearProgrammingRL:
    def __init__(self, num_vars, num_constraints, c, A, b, gamma=0.9, alpha=0.1, epsilon=0.1, num_episodes=1000):
        self.num_vars = num_vars
        self.num_constraints = num_constraints
        self.c = np.array(c)
        self.A = np.array(A)
        self.b = np.array(b)
        self.gamma = gamma
        self.alpha = alpha
        self.epsilon = epsilon
        self.num_episodes = num_episodes
        self.Q = {}

    def encode_state(self, state, num_bins=1000):
        """将状态转换为离散的元组形式，用于Q表索引，分箱数量设置为1000以提高精度。"""
        discrete_state = tuple(np.digitize(state, bins=np.linspace(0, max(state), num_bins)))
        return discrete_state

    def choose_action(self, state):
        """根据 ε-贪婪策略选择动作。"""
        encoded_state = self.encode_state(state)
        if random.uniform(0, 1) < self.epsilon or encoded_state not in self.Q:
            action_var = random.randint(0, self.num_vars - 1)
            action_dir = random.choice([-1, 1])
        else:
            action_options = self.Q[encoded_state]
            action_var, action_dir = max(action_options, key=action_options.get)
        return action_var, action_dir

    def get_reward(self, state):
        """计算当前状态的奖励值，若满足所有约束则返回目标函数值，否则返回负奖励。"""
        for i in range(self.num_constraints):
            if np.dot(self.A[i], state) > self.b[i]:
                return -100
        return np.dot(self.c, state)

    def update_q_value(self, state, action, reward, next_state):
        """使用贝尔曼方程更新Q值。"""
        encoded_state = self.encode_state(state)
        encoded_next_state = self.encode_state(next_state)

        if encoded_state not in self.Q:
            self.Q[encoded_state] = {}
        if action not in self.Q[encoded_state]:
            self.Q[encoded_state][action] = 0

        max_q_next = max(self.Q[encoded_next_state].values()) if encoded_next_state in self.Q else 0
        self.Q[encoded_state][action] += self.alpha * (reward + self.gamma * max_q_next - self.Q[encoded_state][action])

    def train(self):
        """训练Q-learning模型。"""
        for episode in range(self.num_episodes):
            state = np.zeros(self.num_vars, dtype=int)
            while True:
                action_var, action_dir = self.choose_action(state)
                next_state = np.copy(state)
                next_state[action_var] += action_dir * 0.001  # 精确到小数点后三位
                next_state[next_state < 0] = 0

                reward = self.get_reward(next_state)
                self.update_q_value(state, (action_var, action_dir), reward, next_state)

                if reward >= np.dot(self.c, state):
                    break

                state = next_state

    def get_optimal_solution(self):
        """获取训练后的最优解，并确保解的精度达到3位小数。"""
        best_state = max(self.Q, key=lambda s: self.get_reward(np.array(s)))
        optimal_state = np.array(best_state) * 0.001  # 将离散化的解恢复到小数点后三位
        return np.round(optimal_state, 3), round(self.get_reward(optimal_state), 3)

if __name__ == '__main__':
    # 示例使用
    num_vars = 20
    num_constraints = 10
    c = np.random.randint(1, 10, num_vars)  # 目标函数系数
    A = np.random.randint(1, 5, (num_constraints, num_vars))  # 约束系数
    b = np.random.randint(20, 50, num_constraints)  # 约束右端项

    lp_rl_model = LinearProgrammingRL(num_vars, num_constraints, c, A, b)
    lp_rl_model.train()
    optimal_solution, optimal_value = lp_rl_model.get_optimal_solution()

    print("最优解:", optimal_solution)
    print("最大目标值:", optimal_value)
