# coding=utf-8

import numpy as np
import random
from scipy.optimize import linprog


# 生产环境类
class ProductionEnv:
    def __init__(self, min_profit=1000):
        self.num_months = 12
        self.num_factories = 3  # A, B, C工厂
        self.num_products = 5  # g1, g2, g3, g4, g5
        self.factories = ['A', 'B', 'C']
        self.products = ['g1', 'g2', 'g3', 'g4', 'g5']
        self.production_limits = np.random.randint(50, 150, (3, 5))
        self.state = np.zeros((self.num_months, self.num_factories, self.num_products)) # 生产量
        self.base_quality = np.random.uniform(0.7, 0.95, (3, 5))
        self.base_cost = np.random.uniform(10, 50, (3, 5))
        self.base_price = np.random.uniform(100, 200, (3,5))
        # self.prices = np.random.uniform(100, 200, 5)
        self.cost_multiplier = 1.2
        self.action_space = self.create_action_space()
        self.min_profit = min_profit

    def create_action_space(self):
        actions = []
        for factory in range(self.num_factories):
            for product in range(self.num_products):
                actions.append(f'increase_x_{factory}_{product}')  # 增加生产量
                actions.append(f'decrease_x_{factory}_{product}')  # 减少生产量
                actions.append(f'increase_y_{factory}_{product}')  # 提高合格率
                actions.append(f'decrease_y_{factory}_{product}')  # 降低合格率
                actions.append(f"increase_p_{factory}_{product}")  # 增加价格
                actions.append(f"decrease_p_{factory}_{product}")  # 降低价格
        actions.append('no_change')  # 新增不改变动作
        return actions

    def reset(self):
        self.state = np.zeros((self.num_months, self.num_factories, self.num_products))
        return self.state


    def step(self, month, action):
        factory_index, product_index = map(int, action.split('_')[1:3]) if action != 'no_change' else (0, 0)
        current_quality = self.base_quality[factory_index].copy()
        current_cost = self.base_cost[factory_index].copy()
        current_price = self.base_price[factory_index].copy()

        # 根据动作调整生产量
        if action.startswith('increase_x'):
            if self.state[month, factory_index, product_index] < self.production_limits[factory_index, product_index]:
                self.state[month, factory_index, product_index] += 10
        elif action.startswith('decrease_x'):
            self.state[month, factory_index, product_index] = max(0, self.state[month, factory_index, product_index] - 10)
        elif action.startswith('increase_y'):
            new_quality = min(0.95, current_quality[product_index] + 0.1)
            current_cost[product_index] += self.cost_multiplier * (
                        new_quality - self.base_quality[factory_index][product_index]) ** 2
            current_quality[product_index] = new_quality
        elif action.startswith('decrease_y'):
            new_quality = max(0.7, current_quality[product_index] - 0.1)
            current_cost[product_index] -= self.cost_multiplier * (
                        self.base_quality[factory_index][product_index] - new_quality) ** 2
            current_quality[product_index] = new_quality
        elif action.startswith('increase_p'):
            new_price = min(20, current_price[product_index] + 1)
            current_price[product_index] = new_price
        elif action.startswith('increase_p'):
            new_price = max(2, current_price[product_index] - 1)
            current_price[product_index] = new_price
        elif action.startswith('no_change'):
            pass

        done = month == self.num_months - 1

        reward = self.calculate_profit(current_quality, current_cost, month, factory_index)
        return self.state, reward, done


        # factory_index, product_index = action  # 这里 action 应该是 (factory_index, product_index)
        # self.state[month, factory_index, product_index] += 10
        # self.state[month, factory_index, product_index] = min(self.state[month, factory_index, product_index], self.production_limits[factory_index, product_index])
        #
        # quality = self.base_quality[factory_index][product_index]
        # cost = self.base_cost[factory_index][product_index]
        # reward = self.calculate_profit(quality, cost, month, factory_index)
        #
        # done = month == self.num_months - 1
        # next_state = (month + 1, factory_index, product_index)
        # return next_state, reward, done

    def calculate_profit(self, price, quality, cost, month, factory_index):
        # profit = (quality * self.prices.sum() - cost * self.state[month, factory_index].sum())
        # return profit
        profits = (price - cost) * quality * self.state[month, factory_index]
        total_profit = np.sum(profits)

        if total_profit < self.min_profit:
            return -1000  # 违反约束，给予惩罚

        # TODO:考虑限制条件的惩罚

        return total_profit


# 强化学习代理类
class RLAgent:
    def __init__(self, env):
        self.env = env
        self.q_table = np.zeros((env.num_months, 3, 5, 7))
        self.learning_rate = 0.1
        self.gamma = 0.95
        self.epsilon = 1.0
        self.epsilon_decay = 0.99
        self.epsilon_min = 0.1
        self.replay_memory = []
        self.batch_size = 32

    def choose_action(self, state):
        if np.random.rand() < self.epsilon:
            return (random.randint(0, 2), random.randint(0, 4))  # 返回元组 (factory_index, product_index)
        else:
            month, factory, product = state
            # action_index = np.unravel_index(np.argmax(self.q_table[month, factory, product]), self.q_table[month, factory, product].shape)
            action_index = np.unravel_index(np.argmax(self.q_table[month]), self.q_table[month].shape)[:2]
            return action_index  # 确保返回的是 (factory_index, product_index)

    def remember(self, state, action, reward, next_state, done):
        self.replay_memory.append((state, action, reward, next_state, done))

    def replay(self):
        if len(self.replay_memory) < self.batch_size:
            return

        batch = random.sample(self.replay_memory, self.batch_size)
        for state, action, reward, next_state, done in batch:
            month, factory, product = state
            target = reward + self.gamma * np.max(self.q_table[next_state]) if not done else reward
            # action_index = action  # action 应为 (factory_index, product_index)
            action_index = self.env.action_space.index(action)
            if done:
                target = reward
            else:
                next_month, next_factory, next_product = next_state
                target = reward + self.gamma * np.max(self.q_table[next_month, next_factory, next_product])

            # self.q_table[month, factory, product, action_index] += self.learning_rate * (target - self.q_table[month, factory, product, action_index])
            self.q_table[month, factory, product, action_index] += self.learning_rate * (
                        target - self.q_table[month, factory, product, action_index])

    def train(self, episodes):
        # for e in range(episodes):
        #     state = (0, random.randint(0, 2), random.randint(0, 4))  # 初始状态
        #     for month in range(self.env.num_months):
        #         action = self.choose_action(state)  # 确保 action 是一个元组 (factory_index, product_index)
        #         next_state, reward, done = self.env.step(month, action)
        #         self.replay_memory.append((state, action, reward, next_state, done))  # 确保存储的是 (state, action, reward, next_state, done)
        #         state = next_state
        #         if done:
        #             break
        #     if self.epsilon > self.epsilon_min:
        #         self.epsilon *= self.epsilon_decay
        for episode in range(episodes):
            state = self.env.reset()
            total_reward = 0
            done = False

            for month in range(self.env.num_months):
                action = self.choose_action(state)
                next_state, reward = self.env.step(month, action)

                self.remember(state, action, reward, next_state, done)
                self.replay()

                total_reward += reward
                state = next_state

            if self.epsilon > self.epsilon_min:
                self.epsilon *= self.epsilon_decay

            print(f"Episode {episode + 1}/{episodes}, Total Reward: {total_reward}, Epsilon: {self.epsilon}")

# 模拟专家行为的类
class Expert:
    def __init__(self, env, factories, products, max_input, min_price, max_cost, min_quality):
        self.env = env
        self.factories = factories
        self.products = products
        self.max_input = max_input
        self.min_price = min_price
        self.max_cost = max_cost
        self.min_quality = min_quality

    def get_constraints(self):
        num_vars = self.factories * self.products * 12  # 12 months

        A = []
        b = []

        # Input constraints for each factory
        for i in range(self.factories):
            A_row = np.zeros(num_vars)
            for j in range(self.products):
                for k in range(12):
                    idx = i * self.products * 12 + j * 12 + k
                    A_row[idx] = 1
            A.append(A_row)
            b.append(self.max_input)

        # Price, cost, and quality constraints (similar structure)
        # Add your specific constraints here...

        # Price constraints
        for j in range(self.products):
            A_row = np.zeros(num_vars)
            for i in range(self.factories):
                for k in range(12):
                    idx = i * self.products * 12 + j * 12 + k
                    A_row[idx] = 1
            A.append(A_row / 12)  # Average price constraint
            b.append(self.min_price)

        # Cost constraints
        for j in range(self.products):
            A_row = np.zeros(num_vars)
            for i in range(self.factories):
                for k in range(12):
                    idx = i * self.products * 12 + j * 12 + k
                    A_row[idx] = 1
            A.append(A_row / 12)  # Average cost constraint
            b.append(self.max_cost)

        # Quality constraints
        for j in range(self.products):
            A_row = np.zeros(num_vars)
            for i in range(self.factories):
                for k in range(12):
                    idx = i * self.products * 12 + j * 12 + k
                    A_row[idx] = 1
            A.append(A_row / 12)  # Average quality constraint
            b.append(self.min_quality)

        return np.array(A), np.array(b)

    def simulate(self):
        num_vars = self.factories * self.products * 12
        c = -np.ones(num_vars)

        A, b = self.get_constraints()

        result = linprog(c, A_ub=A, b_ub=b, method='highs')

        if result.success:
            self.production_plan = result.x.reshape((self.factories, self.products, 12))
            self.profit = -result.fun
        else:
            print("Optimization failed:", result.message)
            return


def main():
    factories = 3
    products = 5
    max_input = 10000
    min_price = 10
    max_cost = 5
    min_quality = 0.8

    env = ProductionEnv()
    agent = RLAgent(env)
    expert = Expert(env, factories, products, max_input, min_price, max_cost, min_quality)
    agent.train(episodes=1000)
    expert.simulate()
    print(expert.production_plan)
    print(expert.profit)




if __name__ == "__main__":
    main()
