# -*- coding:utf-8 -*-

import numpy as np
import random


class LinearProgrammingRL:
    def __init__(self, num_vars, num_constraints, c, A, b, max_steps=1000, equality_constraints=None, bounds=None, step_sizes=None,
                 num_bins=None, gamma=0.9, alpha=0.1, epsilon=0.1, num_episodes=1000, num_actions_per_step=10, decreasing_vars_indices=None):
        self.num_vars = num_vars
        self.current_step=0
        self.max_steps = max_steps
        self.num_constraints = num_constraints
        self.c = np.array(c)
        self.A = np.array(A)
        self.b = np.array(b)
        self.equality_constraints = equality_constraints if equality_constraints else []
        self.bounds = bounds if bounds else [(0, np.inf)] * num_vars
        self.step_sizes = step_sizes if step_sizes else [0.1] * num_vars
        self.num_bins = num_bins if num_bins else [1000] * num_vars
        self.gamma = gamma
        self.alpha = alpha
        self.epsilon = epsilon
        self.num_episodes = num_episodes
        self.num_actions_per_step = num_actions_per_step
        self.decreasing_vars_indices = decreasing_vars_indices if decreasing_vars_indices else list(range(5, 10))  # 默认第6-10个变量递减
        self.Q = {}
        self.state = None

    def encode_state(self, state):
        """根据每个变量的分箱数量对状态进行离散化。"""
        discrete_state = tuple(
            np.digitize(state[i], bins=np.linspace(self.bounds[i][0], self.bounds[i][1], self.num_bins[i]))
            for i in range(self.num_vars)
        )
        return discrete_state

    def reset(self):
        """重置环境，生成一个新的初始状态。"""
        self.current_step = 0
        self.state = np.array([random.uniform(bound[0], bound[1]) for bound in self.bounds])
        return self.state

    def choose_action(self, episode):
        """选择多个变量的动作，步长按变量范围单独设置。"""
        encoded_state = self.encode_state(self.state)
        epsilon_dynamic = max(0.1, self.epsilon * (1 - episode / self.num_episodes))

        if random.uniform(0, 1) < epsilon_dynamic or encoded_state not in self.Q:
            action_vars = random.sample(range(self.num_vars), self.num_actions_per_step)
            action_dirs = [random.choice([-2,-1,0, 1,2]) for _ in range(self.num_actions_per_step)]
        else:
            action_options = self.Q[encoded_state]
            action_vars, action_dirs = max(action_options, key=action_options.get)

        return action_vars, action_dirs

    def get_reward(self, state):
        """计算当前状态的奖励值，若满足所有约束则返回目标函数值，否则返回负奖励。"""
        for i in range(self.num_constraints):
            if np.dot(self.A[i], state) > self.b[i]:
                return -100

        for vars_idx, eq_value in self.equality_constraints:
            if not np.isclose(sum(state[i] for i in vars_idx), eq_value, atol=1e-3):
                return -100

        for i, (lower, upper) in enumerate(self.bounds):
            if not (lower <= state[i] <= upper):
                return -100

        for i in np.arange(10,15):
            if (state[i+5]-state[i])/state[i] < 0.3:
                return -100

        return np.dot(self.c, state)

    def update_q_value(self, state, action, reward, next_state):
        """使用贝尔曼方程更新Q值。"""
        encoded_state = self.encode_state(state)
        encoded_next_state = self.encode_state(next_state)

        if encoded_state not in self.Q:
            self.Q[encoded_state] = {}
        if action not in self.Q[encoded_state]:
            self.Q[encoded_state][action] = 0

        max_q_next = max(self.Q[encoded_next_state].values()) if encoded_next_state in self.Q else 0
        self.Q[encoded_state][action] += self.alpha * (reward + self.gamma * max_q_next - self.Q[encoded_state][action])

    def step(self, action_vars, action_dirs):
        """执行一个动作，更新状态，并返回新的状态、奖励和是否结束的标记。"""
        next_state = np.copy(self.state)

        for var, direction in zip(action_vars, action_dirs):
            next_state[var] += direction * self.step_sizes[var]
            next_state[var] = np.clip(next_state[var], self.bounds[var][0], self.bounds[var][1])

        if not self.is_decreasing(next_state):
            return self.state, -100, True  # 若递减约束不满足，给予负奖励并终止

        reward = self.get_reward(next_state)
        done = self.check_done(next_state)
        self.state = next_state
        self.current_step += 1
        return next_state, reward, done

    def is_decreasing(self, state):
        """检查指定的变量是否满足非单调递减约束。"""
        for i in range(1, len(self.decreasing_vars_indices)):
            if state[self.decreasing_vars_indices[i]] > state[self.decreasing_vars_indices[i - 1]]:
                return False
        return True

    def check_done(self, state):
        """检查当前状态是否满足所有约束条件，用于确定是否结束回合。"""
        if self.current_step >= self.max_steps:
            return True
        if self.get_reward(state) < 0:
            return False
        return True

    def train(self):
        """训练Q-learning模型，使用多变量动作选择。"""
        history_reward = 0
        for episode in range(self.num_episodes):
            self.reset()
            while True:
                action_vars, action_dirs = self.choose_action(episode)
                next_state, reward, done = self.step(action_vars, action_dirs)
                self.update_q_value(self.state, (tuple(action_vars), tuple(action_dirs)), reward, next_state)

                if done and reward>=history_reward:
                    history_reward = reward
                    break
                # print("reward=",reward,"state=",self.state)

            print("迭代{}次，reward={}，state={}".format(episode+1,reward,self.state))
            print()

    def get_optimal_solution(self):
        """获取训练后的最优解，并确保解的精度达到3位小数。"""
        best_state = max(self.Q, key=lambda s: self.get_reward(np.array(s)))
        optimal_state = np.array(best_state) * 0.001
        return np.round(optimal_state, 3), round(self.get_reward(optimal_state), 3)

if __name__ == '__main__':
    # 示例使用
    num_vars = 20
    num_constraints = 10
    c = np.random.randint(1, 10, num_vars)  # 目标函数系数
    A = np.random.randint(1, 5, (num_constraints, num_vars))  # 不等式约束系数
    b = np.random.randint(1500, 2000, num_constraints)  # 不等式约束右端项

    bounds = [(0, 100)] * 5 + [(0.6, 0.99)] * 5 + [(0.4, 1.65)] * 5 + [(1.1, 1.85)] * 5
    step_sizes = [0.1] * 5 + [0.01] * 5 + [0.01] * 5 + [0.01] * 5
    num_bins = [1000] * 5 + [100] * 5 + [50] * 5 + [100] * 5

    lp_rl_model = LinearProgrammingRL(num_vars, num_constraints, c, A, b, bounds=bounds, step_sizes=step_sizes,
                                      num_bins=num_bins)
    lp_rl_model.train()
    optimal_solution, optimal_value = lp_rl_model.get_optimal_solution()

    print("最优解:", optimal_solution)
    print("最大目标值:", optimal_value)
