# -*- coding:utf-8 -*-

"""
回放机制
"""

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import seaborn as sns
from collections import deque

class FactoryEnvDynamicK:
    def __init__(self, max_products=5, history_length=12):
        # 产品种类数
        self.max_products = max_products

        # 时间序列长度
        self.history_length = history_length

        # 单期状态变量数量 (73)
        self.single_state_dim = 73

        # 动态生产种类数限制
        self.K = np.random.randint(1, self.max_products + 1, size=self.history_length)  # 每期限制随机变化

        # 状态历史记录
        self.state_history = np.zeros((self.history_length, self.single_state_dim), dtype=np.float32)

        self.reset()

    def reset(self):
        # 初始化状态和生产种类限制
        initial_state = np.random.rand(self.single_state_dim)
        self.state_history = np.tile(initial_state, (self.history_length, 1))
        self.K = np.random.randint(1, self.max_products + 1, size=self.history_length)  # 每期限制随机变化
        return self.state_history

    def step(self, action):
        # 提取生产矩阵和控制动作
        production_matrix = action["matrix"]
        control_action = action["control"]

        # 当前状态
        current_state = self.state_history[-1].copy()

        # 动态调整生产矩阵，确保不会越界
        valid_length = min(self.history_length, len(production_matrix))
        for t in range(valid_length):
            if np.sum(production_matrix[t]) > self.K[t]:  # 超出种类数限制
                # 保留收益最高的 K[t] 种产品
                profits = current_state[19:24] * current_state[55:65]  # 价格 * 合格率
                selected_indices = np.argsort(profits)[-self.K[t]:]  # 收益最高的索引
                production_matrix[t] = 0  # 清空当前矩阵
                production_matrix[t, selected_indices] = 1  # 仅保留收益最高的产品

        # 根据调整后的生产矩阵更新状态
        for i in range(self.max_products):
            if production_matrix[-1, i] == 0:  # 不生产
                current_state[19 + i] = 0  # 清零价格
                current_state[34 + i] = 0  # 清零成本
                current_state[55 + i] = 0  # 清零合格率

        # 应用控制动作
        current_state += control_action
        current_state = np.clip(current_state, 0, 1)

        # 更新历史状态
        self.state_history = np.roll(self.state_history, shift=-1, axis=0)
        self.state_history[-1] = current_state

        # 计算奖励
        reward = self.calculate_profit(current_state, production_matrix[-1])

        # 判断是否终止
        done = self.check_termination()

        return self.state_history, reward, done, {}



    def calculate_profit(self, state, production_vector):
        # 利润计算
        prices = state[19:24] * production_vector
        costs = state[34:44] * production_vector
        quality_rates = state[55:65] * production_vector
        raw_materials = state[:8]
        profit = np.sum(prices * quality_rates) - np.sum(costs * raw_materials)
        return profit

    def check_termination(self):
        # 示例终止条件
        recent_profits = [self.calculate_profit(state, np.ones(self.max_products)) for state in self.state_history]
        if sum(recent_profits) > 10000:  # 目标利润
            return True
        return False

    def enforce_realistic_constraints(self, state, production_vector):
        # 资源分配约束
        max_raw_materials = np.array([1.0] * 8)  # 假设原材料供应上限为1.0
        state[:8] = np.minimum(state[:8], max_raw_materials)

        # 价格约束
        min_price, max_price = 0.5, 1.5  # 根据历史数据确定价格范围
        state[19:24] = np.clip(state[19:24], min_price, max_price)

        # 成本约束
        min_cost, max_cost = 0.1, 1.0  # 成本范围
        state[34:44] = np.clip(state[34:44], min_cost, max_cost)

        # 合格率约束
        min_quality, max_quality = 0.7, 1.0  # 合格率范围
        state[55:65] = np.clip(state[55:65], min_quality, max_quality)

        # 确保生产向量是二值的 (0 或 1)
        production_vector = (production_vector > 0.5).astype(float)
        return state, production_vector

class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        indices = np.random.choice(len(self.buffer), batch_size, replace=False)
        states, actions, rewards, next_states, dones = zip(*[self.buffer[idx] for idx in indices])
        return np.array(states), np.array(actions), np.array(rewards), np.array(next_states), np.array(dones)

    def __len__(self):
        return len(self.buffer)

class LSTMModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_dim, hidden_dim, batch_first=True)
        self.fc = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        lstm_out, _ = self.lstm(x)
        output = self.fc(lstm_out[:, -1, :])
        # 将输出限制在实际范围内
        production_logits = torch.sigmoid(output[:, :5])  # 生产矩阵
        control_logits = torch.tanh(output[:, 5:]) * 0.1  # 限制调整幅度
        return torch.cat([production_logits, control_logits], dim=1)

if __name__ == '__main__':
    # 创建环境
    env = FactoryEnvDynamicK()
    replay_buffer = ReplayBuffer(capacity=10000)

    # 定义超参数
    input_dim = 73
    hidden_dim = 128
    output_dim = 5 + 33  # 动作维度: 生产矩阵 (5) + 控制变量 (33)
    learning_rate = 0.001
    num_epochs = 1000
    batch_size = 32

    # 初始化模型和优化器
    model = LSTMModel(input_dim, hidden_dim, output_dim)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # 训练循环
    for epoch in range(num_epochs):
        state = torch.tensor(env.reset(), dtype=torch.float32).unsqueeze(0)
        action_logits = model(state)

        # 提取动作并应用掩码
        production_matrix = (action_logits[:, :5] > 0.5).float()
        control_action = action_logits[:, 5:]

        action = {
            "matrix": production_matrix.squeeze().detach().numpy(),
            "control": control_action.squeeze().detach().numpy()
        }

        # 调整状态约束
        current_state = env.state_history[-1]
        adjusted_state, adjusted_matrix = env.enforce_realistic_constraints(current_state, production_matrix.squeeze().detach().numpy())
        action["matrix"] = adjusted_matrix

        next_state, reward, done, _ = env.step(action)

        # 添加到回放缓冲区
        replay_buffer.add(state.squeeze().numpy(), action_logits.detach().numpy(), reward, next_state, done)

        # 从回放缓冲区采样
        if len(replay_buffer) > batch_size:
            states, actions, rewards, next_states, dones = replay_buffer.sample(batch_size)
            states = torch.tensor(states, dtype=torch.float32)
            actions = torch.tensor(actions, dtype=torch.float32)
            rewards = torch.tensor(rewards, dtype=torch.float32)

            # 计算损失
            predicted = model(states)
            loss = criterion(predicted, actions)

            # 更新模型
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        if (epoch + 1) % 100 == 0:
            print(f"Epoch {epoch + 1}/{num_epochs}, Loss: {loss.item():.4f}")

    # 可视化动态生产种类数
    K = env.K
    plt.figure(figsize=(8, 4))
    plt.plot(range(1, 13), K, marker='o', label="Allowed Product Types")
    plt.xlabel("Period")
    plt.ylabel("Max Product Types")
    plt.title("Dynamic Product Types Allowed per Period")
    plt.legend()
    plt.grid()
    plt.show()

    # 示例生产矩阵热图
    production_matrix = np.random.randint(0, 2, (12, 5))
    for t in range(12):
        if np.sum(production_matrix[t]) > K[t]:
            production_matrix[t, np.sum(production_matrix[t]) - K[t]:] = 0

    plt.figure(figsize=(8, 6))
    sns.heatmap(production_matrix, annot=True, cmap="coolwarm", cbar=False, 
                xticklabels=["g1", "g2", "g3", "g4", "g5"], 
                yticklabels=[f"Period {i+1}" for i in range(12)])
    plt.title("Production Matrix with Dynamic Constraints")
    plt.xlabel("Products")
    plt.ylabel("Periods")
    plt.show()
