import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence
import json
import time
import os

from deadlock_env import DeadlockEnv
from net.numerical_coat_net import NumericalCoATNet
from util.state_processor import process_observation_to_sequence
# 确保 expert_trace_generator.py 中的 generate_symbolic_trace 函数已更新
from util.expert_trace_generator import generate_symbolic_trace
from util.symbolic_manager import SymbolicManager

# --- 配置现在只包含环境和模型的超参数 ---
CONFIG = {
    "NUM_PROCESSES": 5, "NUM_RESOURCES": 3, "D_MODEL": 128, "N_HEAD": 8,
    "NUM_ENCODER_LAYERS": 3, "NUM_DECODER_LAYERS": 3,
    "NUM_TRAJECTORIES": 10000, "EPOCHS": 300, "BATCH_SIZE": 256,
    "LEARNING_RATE": 5e-5,
}


class ExpertTraceDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]


def generate_expert_data(num_trajectories, num_processes, num_resources, symbol_manager):
    """使用传入的 symbol_manager 实例生成专家数据。"""
    expert_data = []
    print(f"Generating {num_trajectories} expert symbolic traces...")
    for i in range(num_trajectories):
        if (i + 1) % 100 == 0:
            print(f"  Generating trace {i + 1}/{num_trajectories}...")
        env = DeadlockEnv(num_processes=num_processes, num_resources=num_resources)
        obs, _ = env.reset()

        state_sequence = process_observation_to_sequence(obs)
        # 关键: 将 manager 实例传递给轨迹生成器，以确保使用统一的词汇表
        symbolic_trace = generate_symbolic_trace(obs, num_processes, num_resources, symbol_manager)
        expert_data.append((state_sequence, symbolic_trace))

    print(f"Data generation complete! Collected {len(expert_data)} (state, trace) pairs.")
    return expert_data


if __name__ == '__main__':
    # 1. 创建唯一且权威的 SymbolicManager 实例
    # 这是所有词汇表信息的唯一来源 (single source of truth)
    symbol_manager = SymbolicManager(
        num_processes=CONFIG['NUM_PROCESSES'],
        num_resources=CONFIG['NUM_RESOURCES']
    )

    # 2. 动态更新 CONFIG，确保 VOCAB_SIZE 的准确性
    # 硬编码词汇表大小是 SFT 和 RL 流程中常见的错误来源
    CONFIG['VOCAB_SIZE'] = symbol_manager.get_vocab_size()
    print(f"Dynamically generated vocabulary size: {CONFIG['VOCAB_SIZE']}")


    def custom_collate(batch):
        """
        自定义的 collate 函数，用于处理 DataLoader 中的批次数据。
        它将状态和轨迹分开，并对轨迹进行填充。
        """
        # 分离状态和轨迹
        states, traces = zip(*batch)

        # 将状态堆叠成一个批次张量
        states_tensor = torch.stack([torch.tensor(s, dtype=torch.float32) for s in states])

        # 将轨迹（token ID 列表）转换为张量，并使用 manager 中定义的 PAD 值进行填充
        traces_padded = pad_sequence(
            [torch.tensor(t, dtype=torch.long) for t in traces],
            batch_first=True,
            padding_value=symbol_manager.PAD  # 关键: 使用 manager 的 PAD ID
        )
        return states_tensor, traces_padded


    # 4. 使用动态词汇表大小初始化模型
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = NumericalCoATNet(
        num_resources=CONFIG['NUM_RESOURCES'],
        vocab_size=CONFIG['VOCAB_SIZE'],  # 关键: 传入正确的词汇表大小
        d_model=CONFIG['D_MODEL'],
        nhead=CONFIG['N_HEAD'],
        num_encoder_layers=CONFIG['NUM_ENCODER_LAYERS'],
        num_decoder_layers=CONFIG['NUM_DECODER_LAYERS'],
    ).to(device)

    # 5. 生成数据，构建 Dataset 和 DataLoader
    expert_traces = generate_expert_data(
        CONFIG['NUM_TRAJECTORIES'],
        CONFIG['NUM_PROCESSES'],
        CONFIG['NUM_RESOURCES'],
        symbol_manager  # 传递 manager 实例
    )
    dataset = ExpertTraceDataset(expert_traces)
    dataloader = DataLoader(dataset, batch_size=CONFIG['BATCH_SIZE'], shuffle=True, collate_fn=custom_collate)

    # 6. 设置损失函数和优化器
    # 关键: 告诉损失函数在计算 loss 时忽略填充部分
    criterion = torch.nn.CrossEntropyLoss(ignore_index=symbol_manager.PAD)
    optimizer = optim.AdamW(model.parameters(), lr=CONFIG['LEARNING_RATE'])

    print("\n--- Starting Supervised Fine-Tuning (SFT for Seq2Seq) ---")
    model.train()
    for epoch in range(CONFIG['EPOCHS']):
        total_loss = 0
        for batch_states, batch_traces in dataloader:
            batch_states = batch_states.to(device)
            batch_traces = batch_traces.to(device)

            # 准备 seq2seq 模型的输入和目标
            # decoder_input 是目标序列去掉最后一个 token
            decoder_input = batch_traces[:, :-1]
            # target_output 是目标序列去掉第一个 token
            target_output = batch_traces[:, 1:]

            # 前向传播。模型内部需要 pad_id 来创建正确的注意力掩码
            output_logits = model(batch_states, decoder_input, pad_id=symbol_manager.PAD)

            # 计算损失。CrossEntropyLoss 要求 logits 是 (N, C) 形状，targets 是 (N)
            loss = criterion(output_logits.view(-1, CONFIG['VOCAB_SIZE']), target_output.reshape(-1))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            total_loss += loss.item()

        avg_loss = total_loss / len(dataloader)
        if (epoch + 1) % 10 == 0:
            print(f"Epoch {epoch + 1}/{CONFIG['EPOCHS']}, Average Loss: {avg_loss:.4f}")

    print("\n--- Training complete ---")

    # 7. 保存模型和包含动态词汇表大小的配置
    experiment_name = "sft_symbolic"
    timestamp = time.strftime('%Y%m%d_%H%M%S')
    save_dir = os.path.join('models', f'{experiment_name}_{timestamp}')
    os.makedirs(save_dir, exist_ok=True)
    print(f"\nAll artifacts will be saved to: {save_dir}")

    model_path = os.path.join(save_dir, "model.pth")
    config_path = os.path.join(save_dir, "config.json")

    torch.save(model.state_dict(), model_path)
    print(f"Model saved to: {model_path}")

    # CONFIG 字典已包含正确的 vocab_size，直接保存即可
    with open(config_path, 'w') as f:
        json.dump(CONFIG, f, indent=4)
    print(f"Configuration saved to: {config_path}")
