# -*- coding:utf-8 -*-

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import seaborn as sns


class FactoryEnvDynamicK:
    def __init__(self, max_products=5, history_length=12):
        # 产品种类数
        self.max_products = max_products

        # 时间序列长度
        self.history_length = history_length

        # 单期状态变量数量 (73)
        self.single_state_dim = 73

        # 动态生产种类数限制
        self.K = np.random.randint(1, self.max_products + 1, size=self.history_length)  # 每期限制随机变化

        # 状态历史记录
        self.state_history = np.zeros((self.history_length, self.single_state_dim), dtype=np.float32)

        self.reset()

    def reset(self):
        # 初始化状态和生产种类限制
        initial_state = np.random.rand(self.single_state_dim)
        self.state_history = np.tile(initial_state, (self.history_length, 1))
        self.K = np.random.randint(1, self.max_products + 1, size=self.history_length)  # 每期限制随机变化
        return self.state_history

    def step(self, action):
        # 提取生产矩阵和控制动作
        production_matrix = action["matrix"]
        control_action = action["control"]

        # 当前状态
        current_state = self.state_history[-1].copy()

        # 动态调整生产矩阵
        for t in range(self.history_length):
            if np.sum(production_matrix[t]) > self.K[t]:  # 超出种类数限制
                # 保留收益最高的 K[t] 种产品
                profits = current_state[19:24] * current_state[55:65]  # 价格 * 合格率
                selected_indices = np.argsort(profits)[-self.K[t]:]  # 收益最高的索引
                production_matrix[t] = 0  # 清空当前矩阵
                production_matrix[t, selected_indices] = 1  # 仅保留收益最高的产品

        # 根据调整后的生产矩阵更新状态
        for i in range(self.max_products):
            if production_matrix[-1, i] == 0:  # 不生产
                current_state[19 + i] = 0  # 清零价格
                current_state[34 + i] = 0  # 清零成本
                current_state[55 + i] = 0  # 清零合格率

        # 应用控制动作
        current_state += control_action
        current_state = np.clip(current_state, 0, 1)

        # 更新历史状态
        self.state_history = np.roll(self.state_history, shift=-1, axis=0)
        self.state_history[-1] = current_state

        # 计算奖励
        reward = self.calculate_profit(current_state, production_matrix[-1])

        # 判断是否终止
        done = self.check_termination()

        return self.state_history, reward, done, {}

    def calculate_profit(self, state, production_vector):
        # 利润计算
        prices = state[19:24] * production_vector
        costs = state[34:44] * production_vector
        quality_rates = state[55:65] * production_vector
        raw_materials = state[:8]
        profit = np.sum(prices * quality_rates) - np.sum(costs * raw_materials)
        return profit

    def check_termination(self):
        # 示例终止条件
        recent_profits = [self.calculate_profit(state, np.ones(self.max_products)) for state in self.state_history]
        if sum(recent_profits) > 10000:  # 目标利润
            return True
        return False


class LSTMModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_dim, hidden_dim, batch_first=True)
        self.fc = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        lstm_out, _ = self.lstm(x)
        output = self.fc(lstm_out[:, -1, :])
        return output

if __name__ == '__main__':
    # 创建环境
    env = FactoryEnvDynamicK()

    # 定义超参数
    input_dim = 73
    hidden_dim = 128
    output_dim = 5 + 33  # 动作维度: 生产矩阵 (5) + 控制变量 (33)
    learning_rate = 0.001
    num_epochs = 1000
    batch_size = 4

    # 初始化模型和优化器
    model = LSTMModel(input_dim, hidden_dim, output_dim)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # 训练循环
    for epoch in range(num_epochs):
        state = torch.tensor(env.reset(), dtype=torch.float32).unsqueeze(0)
        action_logits = model(state)

        # 提取动作
        production_matrix = (action_logits[:, :5] > 0.5).float()
        control_action = action_logits[:, 5:]

        # action = {
        #     "matrix": production_matrix.squeeze().detach().numpy(),
        #     "control": control_action.squeeze().detach().numpy()
        # }
        action = {
            "matrix": production_matrix.squeeze().detach(),
            "control": control_action.squeeze().detach()
        }

        next_state, reward, done, _ = env.step(action)

        # 计算损失
        target = torch.tensor(reward, dtype=torch.float32)
        loss = criterion(action_logits, target.unsqueeze(0))

        # 更新模型
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if (epoch + 1) % 100 == 0:
            print(f"Epoch {epoch + 1}/{num_epochs}, Loss: {loss.item():.4f}")

    # 可视化动态生产种类数
    K = env.K
    plt.figure(figsize=(8, 4))
    plt.plot(range(1, 13), K, marker='o', label="Allowed Product Types")
    plt.xlabel("Period")
    plt.ylabel("Max Product Types")
    plt.title("Dynamic Product Types Allowed per Period")
    plt.legend()
    plt.grid()
    plt.show()
