# coding=utf-8

import numpy as np
from scipy.optimize import linprog
from collections import deque
import random

class ProductionEnv:
    def __init__(self, min_profit=1000):
        self.num_months = 12
        self.num_factories = 3  # A, B, C工厂
        self.num_products = 5  # g1, g2, g3, g4, g5
        self.state = np.zeros((self.num_months, self.num_factories, self.num_products))  # 生产量
        self.production_limits = np.array([[100, 100, 80, 100, 100],
                                           [80, 90, 100, 80, 70],
                                           [90, 80, 70, 60, 0]])

        self.base_cost = np.array([[20, 25, 30, 20, 25],
                                   [20, 35, 40, 30, 30],
                                   [30, 40, 30, 25, 0]])
        self.base_quality = np.array([[0.9, 0.85, 0.8, 0.9, 0.85],
                                      [0.9, 0.75, 0.7, 0.6, 0.7],
                                      [0.85, 0.8, 0.75, 0.6, 0]])
        self.prices = np.array([100, 120, 130, 150, 180])
        self.cost_multiplier = 1.2
        self.action_space = self.create_action_space()
        self.min_profit = min_profit

    def create_action_space(self):
        actions = []
        for factory in range(self.num_factories):
            for product in range(self.num_products):
                actions.append(f'increase_x_{factory}_{product}')  # 增加生产量
                actions.append(f'decrease_x_{factory}_{product}')  # 减少生产量
                actions.append(f'increase_y_{factory}_{product}')  # 提高合格率
                actions.append(f'decrease_y_{factory}_{product}')  # 降低合格率
        actions.append('no_change')  # 新增不改变动作
        return actions

    def reset(self):
        self.state = np.zeros((self.num_months, self.num_factories, self.num_products))
        return self.state

    def step(self, month, action):
        factory_index, product_index = map(int, action.split('_')[2:4]) if action != 'no_change' else (0, 0)
        current_quality = self.base_quality[factory_index].copy()
        current_cost = self.base_cost[factory_index].copy()

        # 根据动作调整生产量
        if action.startswith('increase_x'):
            if self.state[month, factory_index, product_index] < self.production_limits[factory_index, product_index]:
                self.state[month, factory_index, product_index] += 10
        elif action.startswith('decrease_x'):
            self.state[month, factory_index, product_index] = max(0,
                                                                  self.state[month, factory_index, product_index] - 10)
        elif action.startswith('increase_y'):
            new_quality = min(0.95, current_quality[product_index] + 0.1)
            current_cost[product_index] += self.cost_multiplier * (
                        new_quality - self.base_quality[factory_index][product_index]) ** 2
            current_quality[product_index] = new_quality
        elif action.startswith('decrease_y'):
            new_quality = max(0.7, current_quality[product_index] - 0.1)
            current_cost[product_index] -= self.cost_multiplier * (
                        self.base_quality[factory_index][product_index] - new_quality) ** 2
            current_quality[product_index] = new_quality

        # 计算奖励
        reward = self.calculate_profit(current_quality, current_cost, month, factory_index)

        # 返回当前状态（月、工厂、产品索引）
        next_state = (month, factory_index, product_index)
        return self.state, reward, next_state

    def calculate_profit(self, quality, cost, month, factory_index):
        profits = (self.prices - cost) * quality * self.state[month, factory_index]
        total_profit = np.sum(profits)

        if total_profit < self.min_profit:
            return -1000  # 违反约束，给予惩罚

        return total_profit


class IRLAgent:
    def __init__(self, env):
        self.env = env

    def simulate_expert_behavior(self):
        # 假设专家行为通过线性规划模拟
        # 目标是最大化利润
        c = -self.env.prices  # 目标函数系数
        A_eq = np.ones((1, self.env.num_products))
        b_eq = [100]  # 每个月总生产量限制
        # bounds = [(0, self.env.production_limits[factory]) for factory in range(self.env.num_factories)]
        bounds = [(0, None) for _ in range(len(c))]

        result = linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method='highs')

        if result.success:
            print("基线解:", result.x)
            return result.x
        else:
            print("线性规划失败")
            return None

    def learn_reward(self):
        # 评估专家行为以学习奖励
        expert_action = self.simulate_expert_behavior()
        # 基于专家行为的奖励学习逻辑
        return np.sum(expert_action)


class RLAgent:
    def __init__(self, env, learning_rate=0.001, gamma=0.95, epsilon=1.0, epsilon_decay=0.995, min_epsilon=0.01,
                 replay_size=2000, batch_size=32):
        self.env = env
        self.learning_rate = learning_rate
        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_decay = epsilon_decay
        self.min_epsilon = min_epsilon
        self.replay_memory = deque(maxlen=replay_size)
        self.batch_size = batch_size
        self.q_table = np.random.rand(env.num_months, env.num_factories, env.num_products, len(env.action_space))

    def choose_action(self, state):
        if np.random.rand() <= self.epsilon:
            return random.choice(self.env.action_space)
        else:
            month, factory, product = state
            return self.env.action_space[np.argmax(self.q_table[month, factory, product])]

    def remember(self, state, action, reward, next_state, done):
        self.replay_memory.append((state, action, reward, next_state, done))

    def replay(self):
        if len(self.replay_memory) < self.batch_size:
            return

        batch = random.sample(self.replay_memory, self.batch_size)
        for state, action, reward, next_state, done in batch:
            month, factory, product = state  # 从当前状态中解包

            action_index = self.env.action_space.index(action)
            if done:
                target = reward
            else:
                next_month, next_factory, next_product = next_state  # 从下一状态中解包
                target = reward + self.gamma * np.max(self.q_table[next_month, next_factory, next_product])

            # 更新 Q 值
            self.q_table[month, factory, product, action_index] += self.learning_rate * (
                        target - self.q_table[month, factory, product, action_index])

    def train(self, episodes):
        for episode in range(episodes):
            state = self.env.reset()
            total_reward = 0
            done = False

            for month in range(self.env.num_months):
                action = self.choose_action(state)
                next_state, reward = self.env.step(month, action)

                self.remember(state, action, reward, next_state, done)
                self.replay()

                total_reward += reward
                state = next_state

            if self.epsilon > self.min_epsilon:
                self.epsilon *= self.epsilon_decay

            print(f"Episode {episode + 1}/{episodes}, Total Reward: {total_reward}, Epsilon: {self.epsilon}")

def main():
    env = ProductionEnv(min_profit=1000)
    expert = IRLAgent(env)

    # 学习奖励
    avg_reward = expert.learn_reward()
    print("学习到的奖励:", avg_reward)

    # 强化学习代理
    rl_agent = RLAgent(env)
    rl_agent.train(episodes=500)

if __name__ == "__main__":
    main()
