# -*- coding:utf-8 -*-

"""
Mask
"""

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import seaborn as sns

class FactoryEnvDynamicK:
    def __init__(self, max_products=5, history_length=12):
        # 产品种类数
        self.max_products = max_products

        # 时间序列长度
        self.history_length = history_length

        # 单期状态变量数量 (73)
        self.single_state_dim = 73

        # 动态生产种类数限制
        self.K = np.random.randint(1, self.max_products + 1, size=self.history_length)  # 每期限制随机变化

        # 状态历史记录
        self.state_history = np.zeros((self.history_length, self.single_state_dim), dtype=np.float32)

        self.reset()

    def reset(self):
        # 初始化状态和生产种类限制
        initial_state = np.random.rand(self.single_state_dim)
        self.state_history = np.tile(initial_state, (self.history_length, 1))
        self.K = np.random.randint(1, self.max_products + 1, size=self.history_length)  # 每期限制随机变化
        return self.state_history

    def step(self, action):
        # 提取生产矩阵和控制动作
        production_matrix = action["matrix"]
        control_action = action["control"]

        # 当前状态
        current_state = self.state_history[-1].copy()

        # 动态调整生产矩阵
        for t in range(self.history_length):
            if np.sum(production_matrix[t]) > self.K[t]:  # 超出种类数限制
                # 保留收益最高的 K[t] 种产品
                profits = current_state[19:24] * current_state[55:65]  # 价格 * 合格率
                selected_indices = np.argsort(profits)[-self.K[t]:]  # 收益最高的索引
                production_matrix[t] = 0  # 清空当前矩阵
                production_matrix[t, selected_indices] = 1  # 仅保留收益最高的产品

        # 根据调整后的生产矩阵更新状态
        for i in range(self.max_products):
            if production_matrix[-1, i] == 0:  # 不生产
                current_state[19 + i] = 0  # 清零价格
                current_state[34 + i] = 0  # 清零成本
                current_state[55 + i] = 0  # 清零合格率

        # 应用控制动作
        current_state += control_action
        current_state = np.clip(current_state, 0, 1)

        # 更新历史状态
        self.state_history = np.roll(self.state_history, shift=-1, axis=0)
        self.state_history[-1] = current_state

        # 计算奖励
        reward = self.calculate_profit(current_state, production_matrix[-1])

        # 判断是否终止
        done = self.check_termination()

        return self.state_history, reward, done, {}

    def calculate_profit(self, state, production_vector):
        # 利润计算
        prices = state[19:24] * production_vector
        costs = state[34:44] * production_vector
        quality_rates = state[55:65] * production_vector
        raw_materials = state[:8]
        profit = np.sum(prices * quality_rates) - np.sum(costs * raw_materials)
        return profit

    def check_termination(self):
        # 示例终止条件
        recent_profits = [self.calculate_profit(state, np.ones(self.max_products)) for state in self.state_history]
        if sum(recent_profits) > 10000:  # 目标利润
            return True
        return False

    def enforce_realistic_constraints(self, state, production_vector):
        # 资源分配约束
        max_raw_materials = np.array([1.0] * 8)  # 假设原材料供应上限为1.0
        state[:8] = np.minimum(state[:8], max_raw_materials)

        # 价格约束
        min_price, max_price = 0.5, 1.5  # 根据历史数据确定价格范围
        state[19:24] = np.clip(state[19:24], min_price, max_price)

        # 成本约束
        min_cost, max_cost = 0.1, 1.0  # 成本范围
        state[34:44] = np.clip(state[34:44], min_cost, max_cost)

        # 合格率约束
        min_quality, max_quality = 0.7, 1.0  # 合格率范围
        state[55:65] = np.clip(state[55:65], min_quality, max_quality)

        # 确保生产向量是二值的 (0 或 1)
        production_vector = (production_vector > 0.5).astype(float)
        return state, production_vector

class LSTMModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_dim, hidden_dim, batch_first=True)
        self.fc = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        lstm_out, _ = self.lstm(x)
        output = self.fc(lstm_out[:, -1, :])
        # 将输出限制在实际范围内
        production_logits = torch.sigmoid(output[:, :5])  # 生产矩阵
        control_logits = torch.tanh(output[:, 5:]) * 0.1  # 限制调整幅度
        return torch.cat([production_logits, control_logits], dim=1)

if __name__ == '__main__':
    # 创建环境
    env = FactoryEnvDynamicK()

    # 定义超参数
    input_dim = 73
    hidden_dim = 128
    output_dim = 5 + 33  # 动作维度: 生产矩阵 (5) + 控制变量 (33)
    learning_rate = 0.001
    num_epochs = 1000
    batch_size = 4

    # 初始化模型和优化器
    model = LSTMModel(input_dim, hidden_dim, output_dim)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # 训练循环
    for epoch in range(num_epochs):
        state = torch.tensor(env.reset(), dtype=torch.float32).unsqueeze(0)
        action_logits = model(state)

        # 提取动作并应用掩码
        production_matrix = (action_logits[:, :5] > 0.5).float()
        control_action = action_logits[:, 5:]

        action = {
            "matrix": production_matrix.squeeze().detach().numpy(),
            "control": control_action.squeeze().detach().numpy()
        }

        # 调整状态约束
        current_state = env.state_history[-1]
        adjusted_state, adjusted_matrix = env.enforce_realistic_constraints(current_state, production_matrix.squeeze().detach().numpy())
        action["matrix"] = adjusted_matrix

        next_state, reward, done, _ = env.step(action)

        # 计算损失
        target = torch.tensor(reward, dtype=torch.float32)
        loss = criterion(action_logits, target.unsqueeze(0))

        # 更新模型
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if (epoch + 1) % 100 == 0:
            print(f"Epoch {epoch + 1}/{num_epochs}, Loss: {loss.item():.4f}")

    # 可视化动态生产种类数
    K = env.K
    plt.figure(figsize=(8, 4))
    plt.plot(range(1, 13), K, marker='o', label="Allowed Product Types")
    plt.xlabel("Period")
    plt.ylabel("Max Product Types")
    plt.title("Dynamic Product Types Allowed per Period")
    plt.legend()
    plt.grid()
    plt.show()

    # 示例生产矩阵热图
    production_matrix = np.random.randint(0, 2, (12, 5))
    for t in range(12):
        if np.sum(production_matrix[t]) > K[t]:
            production_matrix[t, np.sum(production_matrix[t]) - K[t]:] = 0

    plt.figure(figsize=(8, 6))
    sns.heatmap(production_matrix, annot=True, cmap="coolwarm", cbar=False,
                xticklabels=["g1", "g2", "g3", "g4", "g5"],
                yticklabels=[f"Period {i+1}" for i in range(12)])
    plt.title("Production Matrix with Dynamic Constraints")
    plt.xlabel("Products")
    plt.ylabel("Periods")
    plt.show()

from torch.distributions import Normal

class FactoryEnv:
    def __init__(self, max_products=5, state_dim=4):
        """
        环境初始化
        :param max_products: 最大产品数量
        :param state_dim: 每个产品的状态维度（产出、良率、价格、成本）
        """
        self.max_products = max_products
        self.state_dim = state_dim
        self.action_dim = 4  # 动作维度（产量、良率调整、价格调整、成本调整）
        self.reset()

    def reset(self):
        """
        重置环境
        """
        self.K = np.random.randint(1, self.max_products + 1)  # 随机生成当前产品数量
        self.state = np.random.rand(self.K, self.state_dim)  # 初始化状态空间
        return self.state

    def step(self, action, mask):
        """
        环境步进逻辑
        :param action: 动作 (产量、调整参数等)
        :param mask: 掩码，屏蔽无效动作
        """
        assert action.shape == (self.K, self.action_dim), "动作维度与产品数量不匹配"
        production_decision = action[:, 0] * mask  # 产量受掩码约束
        reward = np.sum(production_decision)  # 奖励是总产量（示例）

        # 更新状态，加入随机扰动
        self.state += np.random.normal(0, 0.01, size=self.state.shape)
        done = False  # 示例中没有终止条件
        return self.state, reward, done, {}

class LSTM_A2C(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim=128):
        """
        LSTM-A2C 模型
        :param state_dim: 状态维度
        :param action_dim: 动作维度
        :param hidden_dim: LSTM 隐藏层维度
        """
        super(LSTM_A2C, self).__init__()
        self.lstm = nn.LSTM(state_dim, hidden_dim, batch_first=True)
        self.actor_fc = nn.Linear(hidden_dim, action_dim * 2)  # 动作分布 (均值和方差)
        self.critic_fc = nn.Linear(hidden_dim, 1)  # 状态值函数

    def forward(self, x, mask):
        """
        模型前向传播
        :param x: 状态输入序列
        :param mask: 掩码
        """
        lstm_out, _ = self.lstm(x)
        lstm_out = lstm_out[:, -1, :]  # 取最后一个时间步输出
        actor_logits = self.actor_fc(lstm_out)  # 动作分布参数
        actor_logits = actor_logits * mask  # 掩码作用
        critic_value = self.critic_fc(lstm_out)  # 状态值
        return actor_logits, critic_value

class A2CTrainer:
    def __init__(self, env, model, optimizer, gamma=0.99):
        """
        A2C 训练器
        :param env: 强化学习环境
        :param model: A2C 模型
        :param optimizer: 优化器
        :param gamma: 折扣因子
        """
        self.env = env
        self.model = model
        self.optimizer = optimizer
        self.gamma = gamma

    def select_action(self, actor_logits, mask):
        """
        动作选择逻辑
        :param actor_logits: 动作分布参数
        :param mask: 掩码
        """
        actor_logits = actor_logits * mask
        mean, std = actor_logits.split(actor_logits.size(-1) // 2, dim=-1)
        std = torch.clamp(std.exp(), 1e-3, 2.0)
        dist = Normal(mean, std)
        action = dist.sample()
        action = action * mask
        log_prob = dist.log_prob(action)
        return action, log_prob

    def train(self, episodes=100, max_steps=50):
        """
        A2C 训练过程
        :param episodes: 总训练轮数
        :param max_steps: 每个 episode 最大步数
        """
        for episode in range(episodes):
            state = self.env.reset()
            state_history = [state]
            mask = torch.ones((1, self.env.K, self.env.action_dim))
            rewards, log_probs, values = [], [], []

            for t in range(max_steps):
                state_tensor = torch.tensor(state_history, dtype=torch.float32)
                actor_logits, value = self.model(state_tensor, mask)
                action, log_prob = self.select_action(actor_logits, mask)
                next_state, reward, done, _ = self.env.step(action.detach(), mask)

                state_history.append(next_state)
                rewards.append(reward)
                log_probs.append(log_prob)
                values.append(value)

                if done:
                    break

            self._update(rewards, log_probs, values)

    def _update(self, rewards, log_probs, values):
        """
        更新模型
        :param rewards: 奖励序列
        :param log_probs: 动作的 log 概率
        :param values: Critic 值
        """
        returns = []
        G = 0
        for r in reversed(rewards):
            G = r + self.gamma * G
            returns.insert(0, G)
        returns = torch.tensor(returns, dtype=torch.float32)
        values = torch.cat(values)
        log_probs = torch.cat(log_probs)

        advantage = returns - values
        actor_loss = -(log_probs * advantage.detach()).mean()
        critic_loss = advantage.pow(2).mean()

        self.optimizer.zero_grad()
        loss = actor_loss + critic_loss
        loss.backward()
        self.optimizer.step()

# 实例化环境、模型和训练器
env = FactoryEnv(max_products=5, state_dim=4)
model = LSTM_A2C(state_dim=4, action_dim=4, hidden_dim=128)
optimizer = optim.Adam(model.parameters(), lr=0.001)
trainer = A2CTrainer(env, model, optimizer)

# 开始训练
trainer.train(episodes=100)
