# -*- coding:utf-8

import gym
from gym import spaces
import numpy as np


class ProductionEnv(gym.Env):
    def __init__(self, max_steps=12):
        super(ProductionEnv, self).__init__()

        # 环境参数
        self.max_steps = max_steps
        self.current_step = 0

        # 状态空间：26维，涵盖工厂的生产投入、合格率、成本和价格
        self.observation_space = spaces.Box(low=0, high=np.inf, shape=(26,), dtype=np.float32)

        # 动作空间：24维，控制投入、合格率、价格、成本等
        self.action_space = spaces.MultiDiscrete([
            4, 4, 4, 4, 4, 4, 3, 3, 3,  # 工厂A：原材料、合格率、价格
            4, 4, 4, 4, 4, 4, 3, 3, 3,  # 工厂B：原材料、合格率、价格
            4, 4, 4, 4, 3, 3  # 工厂C：原材料、合格率、价格
        ])

        # 初始化状态
        self.state = self.reset()

    def reset(self):
        # 重置状态为初始值，随机生成合理范围内的值
        self.current_step = 0
        self.state = np.random.rand(26) * 100  # 假设状态初始值为0-100之间的随机值
        return self.state

    def step(self, action):
        # 更新当前状态
        self.update_state(action)

        # 计算奖励
        reward = self.calculate_reward(action)

        # 检查是否达到结束条件
        done = self.check_done()

        # 增加当前时间步
        self.current_step += 1

        # 返回状态、奖励、是否结束及额外信息
        return self.state, reward, done, {}

    def update_state(self, action):
        # 更新工厂A的生产投入
        self.state[0] = action[0] * 10  # g1的原材料投入量
        self.state[1] = action[1] * 10  # g2的原材料投入量
        self.state[2] = action[2] * 10  # g4的原材料投入量

        # 更新工厂A的合格率
        self.state[3] = action[3] / 3.0  # g1的合格率
        self.state[4] = action[4] / 3.0  # g2的合格率
        self.state[5] = action[5] / 3.0  # g4的合格率

        # 更新工厂A的价格
        self.state[6] = action[6] * 100  # g1的价格
        self.state[7] = action[7] * 100  # g2的价格
        self.state[8] = action[8] * 100  # g4的价格

        # 工厂B的生产投入、合格率和价格
        self.state[9] = action[9] * 10  # g1的原材料投入量
        self.state[10] = action[10] * 10  # g3的原材料投入量
        self.state[11] = action[11] * 10  # g5的原材料投入量

        self.state[12] = action[12] / 3.0  # g1的合格率
        self.state[13] = action[13] / 3.0  # g3的合格率
        self.state[14] = action[14] / 3.0  # g5的合格率

        self.state[15] = action[15] * 100  # g1的价格
        self.state[16] = action[16] * 100  # g3的价格
        self.state[17] = action[17] * 100  # g5的价格

        # 工厂C的生产投入、合格率和价格
        self.state[18] = action[18] * 10  # g3的原材料投入量
        self.state[19] = action[19] * 10  # g4的原材料投入量

        self.state[20] = action[20] / 3.0  # g3的合格率
        self.state[21] = action[21] / 3.0  # g4的合格率

        self.state[22] = action[22] * 100  # g3的价格
        self.state[23] = action[23] * 100  # g4的价格

        # 更新最后两个状态变量 (例如总投入和总产出，或者成本等动态量)
        self.state[24] = np.sum(action[:12])  # 总的原材料投入量
        self.state[25] = np.sum(action[12:24])  # 总的合格率、价格等的调整影响

    def calculate_reward(self, action):
        # 销售收入 = 产品价格 × 合格品数量
        revenue_A = (self.state[6] * self.state[3] +  # 工厂A g1收入
                     self.state[7] * self.state[4] +  # 工厂A g2收入
                     self.state[8] * self.state[5])  # 工厂A g4收入

        revenue_B = (self.state[15] * self.state[12] +  # 工厂B g1收入
                     self.state[16] * self.state[13] +  # 工厂B g3收入
                     self.state[17] * self.state[14])  # 工厂B g5收入

        revenue_C = (self.state[22] * self.state[20] +  # 工厂C g3收入
                     self.state[23] * self.state[21])  # 工厂C g4收入

        # 总收入
        total_revenue = revenue_A + revenue_B + revenue_C

        # 生产成本 = 原材料投入量 * 原材料单价（假设为常数）
        material_cost_A = (self.state[0] + self.state[1] + self.state[2]) * 10  # 工厂A
        material_cost_B = (self.state[9] + self.state[10] + self.state[11]) * 10  # 工厂B
        material_cost_C = (self.state[18] + self.state[19]) * 10  # 工厂C

        # 总成本
        total_cost = material_cost_A + material_cost_B + material_cost_C

        # 奖励 = 总收入 - 总成本
        reward = total_revenue - total_cost

        # 惩罚：如果合格率低于0.5
        min_quality_threshold = 0.5
        quality_penalty = 0
        for i in [3, 4, 5, 12, 13, 14, 20, 21]:
            if self.state[i] < min_quality_threshold:
                quality_penalty -= 50

        reward += quality_penalty
        return reward

    def check_done(self):
        # 结束条件 1：达到最大时间步
        if self.current_step >= self.max_steps:
            return True

        # 结束条件 2：利润低于0
        current_profit = self.calculate_reward(None)
        if current_profit < 0:
            return True

        return False

    def render(self, mode='human'):
        # 可视化输出
        print(f"Step: {self.current_step}, State: {self.state}")

if __name__ == '__main__':
    env = ProductionEnv(max_steps=12)

    # 重置环境
    state = env.reset()
    print("Initial State:", state)

    # 模拟几步随机动作
    for _ in range(5):
        action = env.action_space.sample()  # 随机生成动作
        state, reward, done, info = env.step(action)  # 执行动作
        print(f"New State: {state}, Reward: {reward}, Done: {done}")
        if done:
            break

    # 重置环境后再次测试
    state = env.reset()
    print("After Reset State:", state)
