# coding=utf-8

import numpy as np
import random
from scipy.optimize import linprog


# 生产环境类
class ProductionEnv:
    def __init__(self, num_months=12):
        self.num_months = num_months
        self.factories = ['A', 'B', 'C']
        self.products = ['g1', 'g2', 'g3', 'g4', 'g5']
        self.production_limits = np.random.randint(50, 150, (3, 5))
        self.base_quality = np.random.uniform(0.7, 0.95, (3, 5))
        self.base_cost = np.random.uniform(10, 50, (3, 5))
        self.prices = np.random.uniform(100, 200, 5)
        self.state = np.zeros((num_months, 3, 5))

    def step(self, month, action):
        factory_index = action[0]
        product_index = action[1]  # 这里 action 应该是 (factory_index, product_index)
        self.state[month, factory_index, product_index] += 10
        self.state[month, factory_index, product_index] = min(self.state[month, factory_index, product_index],
                                                              self.production_limits[factory_index, product_index])

        quality = self.base_quality[factory_index][product_index]
        cost = self.base_cost[factory_index][product_index]
        reward = self.calculate_profit(quality, cost, month, factory_index)

        done = month == self.num_months - 1
        next_state = (month + 1, factory_index, product_index)
        return next_state, reward, done

    def calculate_profit(self, quality, cost, month, factory_index):
        profit = (quality * self.prices.sum() - cost * self.state[month, factory_index].sum())
        return profit


# 强化学习代理类
class RLAgent:
    def __init__(self, env):
        self.env = env
        self.q_table = np.zeros((env.num_months, 3, 5, 4))
        self.learning_rate = 0.1
        self.gamma = 0.95
        self.epsilon = 1.0
        self.epsilon_decay = 0.99
        self.epsilon_min = 0.1
        self.replay_memory = []
        self.batch_size = 32

    def choose_action(self, state):
        month, factory, product = state
        if np.random.rand() < self.epsilon:
            return (random.randint(0, 2), random.randint(0, 4))  # 返回元组 (factory_index, product_index)
        else:
            q_values = self.q_table[month, factory, product]
            if len(q_values.shape) == 1:  # 如果是1D数组
                action_index = np.argmax(q_values)
                return (factory, action_index)  # 返回工厂索引和产品索引
            else:  # 如果是2D数组
                action_index = np.unravel_index(np.argmax(q_values), q_values.shape)
                return action_index  # 返回工厂索引和产品索引

    def replay(self):
        if len(self.replay_memory) < self.batch_size:
            return

        batch = random.sample(self.replay_memory, self.batch_size)
        for state, action, reward, next_state, done in batch:
            month, factory, product = state
            next_month, next_factory, next_product = next_state

            target = reward + self.gamma * np.max(
                self.q_table[next_month, next_factory, next_product]) if not done else reward

            action_index = action  # action 应为 (factory_index, product_index)
            current_q = self.q_table[month, factory, product, action_index]
            self.q_table[month, factory, product, action_index] += self.learning_rate * (target - current_q)

            print(f"Updated Q-value for {state}, action {action}: {self.q_table[month, factory, product, action_index]}")

    def train(self, episodes):
        for e in range(episodes):
            state = (0, random.randint(0, 2), random.randint(0, 4))  # 初始状态
            for month in range(self.env.num_months):

                action = self.choose_action(state)  # 确保 action 是一个元组 (factory_index, product_index)
                if len(action) == 1:
                    pass
                next_state, reward, done = self.env.step(month, action)
                self.replay_memory.append(
                    (state, action, reward, next_state, done))  # 确保存储的是 (state, action, reward, next_state, done)
                state = next_state
                if done:
                    break

            if self.epsilon > self.epsilon_min:
                self.epsilon *= self.epsilon_decay


# 模拟专家行为的类
class Expert:
    def __init__(self, env):
        self.env = env

    def simulate(self):
        c = -self.env.prices  # 目标函数的系数（负号表示最大化利润）
        A = []
        b = []

        # 假设每个工厂的最大生产量是生产限制总和
        for factory_index in range(3):
            constraint = np.zeros(len(self.env.products))
            for product_index in range(len(self.env.products)):
                constraint[product_index] = 1  # 每个产品的约束
            A.append(constraint)  # 每个工厂的约束
            b.append(self.env.production_limits[factory_index].sum())  # 对应的生产限制

        A = np.array(A)  # 转换为二维数组
        b = np.array(b)  # 转换为一维数组

        bounds = [(0, None) for _ in range(len(self.env.products))]  # 每个产品的下界

        # 调用linprog
        res = linprog(c, A_ub=A, b_ub=b, bounds=bounds, method='highs')

        if res.success:
            print("Optimal production:", res.x)
        else:
            print("Optimization failed:", res.message)


# 主程序
if __name__ == "__main__":
    env = ProductionEnv()
    agent = RLAgent(env)
    expert = Expert(env)

    agent.train(episodes=1000)  # 训练代理
    expert.simulate()  # 可选：模拟专家行为
    print(agent.q_table)
