# coding:utf-8

import gym
from gym import spaces
import numpy as np
import random


class ProductionEnv(gym.Env):
    def __init__(self, max_steps=12):
        super(ProductionEnv, self).__init__()

        self.max_steps = max_steps
        self.current_step = 0

        # 状态空间：26维
        self.observation_space = spaces.Box(low=0, high=np.inf, shape=(26,), dtype=np.float32)

        # 动作空间：24维
        self.action_space = spaces.MultiDiscrete([4] * 12 + [3] * 6 + [4] * 6)  # 12个投入，6个合格率，6个价格

        self.state = np.zeros(26, dtype=np.float32)  # 初始化状态为零
        self.reset()

    def reset(self):
        self.current_step = 0
        self.state = np.random.rand(26) * 100  # 随机初始化状态
        return self.state

    def step(self, action):
        self.update_state(action)
        reward = self.calculate_reward(action)
        done = self.check_done()
        self.current_step += 1
        return self.state, reward, done, {}

    def update_state(self, action):
        # 确保状态数组更新是正确的
        self.state[0:3] = action[0:3] * 10  # g1, g2, g4的原材料投入量
        self.state[3:6] = action[3:6] / 3.0  # g1, g2, g4的合格率
        self.state[6:9] = action[6:9] * 100  # g1, g2, g4的价格

        self.state[9:12] = action[9:12] * 10  # g1, g3, g5的原材料投入量
        self.state[12:15] = action[12:15] / 3.0  # g1, g3, g5的合格率
        self.state[15:18] = action[15:18] * 100  # g1, g3, g5的价格

        self.state[18:20] = action[18:20] * 10  # g3, g4的原材料投入量
        self.state[20:22] = action[20:22] / 3.0  # g3, g4的合格率
        self.state[22:24] = action[22:24] * 100  # g3, g4的价格

        self.state[24] = np.sum(self.state[0:12])  # 总的原材料投入量
        self.state[25] = np.sum(self.state[3:6]) + np.sum(self.state[12:15]) + np.sum(self.state[20:22])  # 总合格品

    def calculate_reward(self, action):
        revenue_A = (self.state[6] * self.state[3] +
                     self.state[7] * self.state[4] +
                     self.state[8] * self.state[5])

        revenue_B = (self.state[15] * self.state[12] +
                     self.state[16] * self.state[13] +
                     self.state[17] * self.state[14])

        revenue_C = (self.state[22] * self.state[20] +
                     self.state[23] * self.state[21])

        total_revenue = revenue_A + revenue_B + revenue_C

        material_cost_A = (self.state[0] + self.state[1] + self.state[2]) * 10
        material_cost_B = (self.state[9] + self.state[10] + self.state[11]) * 10
        material_cost_C = (self.state[18] + self.state[19]) * 10

        total_cost = material_cost_A + material_cost_B + material_cost_C

        reward = total_revenue - total_cost

        min_quality_threshold = 0.5
        quality_penalty = 0
        for i in [3, 4, 5, 12, 13, 14, 20, 21]:
            if self.state[i] < min_quality_threshold:
                quality_penalty -= 50

        reward += quality_penalty
        return reward

    def check_done(self):
        if self.current_step >= self.max_steps:
            return True
        current_profit = self.calculate_reward(None)
        if current_profit < 0:
            return True
        return False

    def render(self, mode='human'):
        print(f"Step: {self.current_step}, State: {self.state}")


class QLearningAgent:
    def __init__(self, env, learning_rate=0.1, discount_factor=0.95, exploration_prob=1.0, exploration_decay=0.99):
        self.env = env
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.exploration_prob = exploration_prob
        self.exploration_decay = exploration_decay
        self.q_table = np.zeros((1000, 24))  # 假设状态离散化为1000个状态

    def discretize_state(self, state):
        # 将连续状态离散化
        return int(state[0] // 10) + int(state[1] // 10) * 10  # 简单示例

    def choose_action(self, state):
        if random.uniform(0, 1) < self.exploration_prob:
            return self.env.action_space.sample()  # 探索
        else:
            state_index = self.discretize_state(state)
            return np.argmax(self.q_table[state_index])  # 利用
            # return self.q_table[state_index]

    def update_q_table(self, state, action, reward, next_state):
        state_index = self.discretize_state(state)
        next_state_index = self.discretize_state(next_state)
        best_next_action = np.argmax(self.q_table[next_state_index])
        td_target = reward + self.discount_factor * self.q_table[next_state_index][best_next_action]
        try:
            self.q_table[state_index][action] += self.learning_rate * (td_target - self.q_table[state_index][action])
        except Exception as e:
            self.q_table[state_index][action] += self.learning_rate * (td_target - self.q_table[state_index][action])

    def train(self, episodes):
        for episode in range(episodes):
            state = self.env.reset()
            done = False

            while not done:
                action = self.choose_action(state)
                next_state, reward, done, _ = self.env.step(action)
                self.update_q_table(state, action, reward, next_state)
                state = next_state

            self.exploration_prob *= self.exploration_decay  # 减少探索概率

if __name__ == '__main__':

    # 实例化环境和 Q-learning 代理
    env = ProductionEnv(max_steps=12)
    agent = QLearningAgent(env)

    # 训练代理
    agent.train(episodes=1000)

    # 测试代理
    total_profit = 0
    monthly_profits = []

    for month in range(12):
        state = env.reset()
        done = False
        monthly_profit = 0

        while not done:
            action = agent.choose_action(state)
            next_state, reward, done, _ = env.step(action)
            monthly_profit += reward
            state = next_state

        total_profit += monthly_profit
        monthly_profits.append(monthly_profit)

    print("一年总利润:", total_profit)
    print("每个月利润:", monthly_profits)
