import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence
import json
import time
import os
import numpy as np

# 导入环境以及生成轨迹时需要的常量
from deadlock_env import DeadlockEnv, KAPPA_ROLLBACK, KAPPA_PREEMPT, STATE_BLOCKED
from net.numerical_coat_net import NumericalCoATNet
from rl_train import make_env
from util.state_processor import process_observation_to_sequence
from util.symbolic_manager import SymbolicManager

# --- 配置现在只包含环境和模型的超参数 ---
CONFIG = {
    "NUM_PROCESSES": 5, "NUM_RESOURCES": 3, "D_MODEL": 128, "N_HEAD": 8,
    "NUM_ENCODER_LAYERS": 3, "NUM_DECODER_LAYERS": 3,
    "NUM_TRAJECTORIES": 10000, "EPOCHS": 1000, "BATCH_SIZE": 256,
    "LEARNING_RATE": 5e-5,
}


class ExpertTraceDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]


def find_cycle_dfs(node, graph, path, visited):
    """用于在等待图中寻找环路的辅助函数 (DFS)。"""
    path.append(node)
    visited.add(node)

    next_node = graph.get(node)
    if next_node in path:
        try:
            cycle_start_index = path.index(next_node)
            return path[cycle_start_index:]
        except ValueError:
            return None

    if next_node and next_node not in visited:
        cycle = find_cycle_dfs(next_node, graph, path, visited)
        if cycle:
            return cycle

    path.pop()
    return None


def generate_symbolic_trace(obs, num_processes, num_resources, symbol_manager: SymbolicManager):
    """
    [修正版] 分析死锁状态，并使用传入的 symbol_manager 生成一个符号化的专家推理轨迹。
    这个版本使用了与 SymbolicManager 兼容的 token。
    """
    trace_ids = [symbol_manager.START]

    def add_tokens_to_trace(tokens):
        """辅助函数，将 token 字符串列表转换为 ID 并添加到轨迹中。"""
        trace_ids.extend([symbol_manager.token_to_idx[t] for t in tokens])

    # 1. 构建等待图 (Wait-for Graph)
    waits_for = {}
    resource_holder = {}
    for p_id in range(num_processes):
        for r_id in range(num_resources):
            if obs['allocation'][p_id, r_id] > 0:
                resource_holder[r_id] = p_id

    blocked_pids = np.where(obs['processes_status'] == STATE_BLOCKED)[0]

    # 追踪等待关系
    for p_id in blocked_pids:
        needed_r_ids = np.where(obs['need'][p_id] > 0)[0]
        if not needed_r_ids.any(): continue
        needed_r_id = needed_r_ids[0]

        p_token = f'P{p_id}'
        r_token = f'R{needed_r_id}'

        # 添加轨迹: P(x) [WAIT_FOR] R(y)
        add_tokens_to_trace([p_token, '[WAIT_FOR]', r_token])

        if needed_r_id in resource_holder:
            holder_p_id = resource_holder[needed_r_id]
            waits_for[p_id] = holder_p_id
            holder_p_token = f'P{holder_p_id}'
            # 添加轨迹: R(y) [HELD_BY] P(z)
            add_tokens_to_trace([r_token, '[HELD_BY]', holder_p_token])

    # 2. 从等待图中寻找环路
    path = []
    visited = set()
    cycles = []
    for p_id in waits_for:
        if p_id not in visited:
            cycle = find_cycle_dfs(p_id, waits_for, path, visited)
            if cycle:
                cycles.append(cycle)
                cycle_p_tokens = [f'P{pid}' for pid in cycle]
                # 添加轨迹: [CYCLE_FOUND] P(x) P(y)... [END_CYCLE]
                add_tokens_to_trace(['[CYCLE_FOUND]'] + cycle_p_tokens + ['[END_CYCLE]'])

    # 3. 基于环路进行决策
    if cycles:
        target_cycle = cycles[0]
        victim_pid = -1
        min_cost = float('inf')
        victim_action_token = ''

        for p_id in target_cycle:
            p_token = f'P{p_id}'
            is_realtime = obs['process_properties'][p_id]['is_realtime']

            # 评估 Rollback 代价
            add_tokens_to_trace(['[EVAL_COST]', p_token, '[ROLLBACK]'])
            if KAPPA_ROLLBACK < min_cost:
                min_cost = KAPPA_ROLLBACK
                victim_pid = p_id
                victim_action_token = '[ROLLBACK]'

            # 如果是实时进程，评估 Preempt 代价
            if is_realtime:
                add_tokens_to_trace(['[EVAL_COST]', p_token, '[PREEMPT]'])
                if KAPPA_PREEMPT < min_cost:
                    min_cost = KAPPA_PREEMPT
                    victim_pid = p_id
                    victim_action_token = '[PREEMPT]'

        if victim_pid != -1:
            victim_p_token = f'P{victim_pid}'
            # 添加轨迹: [DECIDE] [ACTION] P(victim)
            add_tokens_to_trace(['[DECIDE]', victim_action_token, victim_p_token])

    trace_ids.append(symbol_manager.END)
    return trace_ids


def generate_expert_data(num_trajectories, num_processes, num_resources, symbol_manager):
    """使用传入的 symbol_manager 实例生成专家数据。"""
    expert_data = []
    print(f"Generating {num_trajectories} expert symbolic traces...")
    for i in range(num_trajectories):
        if (i + 1) % 100 == 0:
            print(f"  Generating trace {i + 1}/{num_trajectories}...")
        env = DeadlockEnv(num_processes=num_processes, num_resources=num_resources)
        obs, _ = env.reset()

        state_sequence = process_observation_to_sequence(obs)
        # 关键: 现在调用的是本文件内的修正版函数
        symbolic_trace = generate_symbolic_trace(obs, num_processes, num_resources, symbol_manager)
        expert_data.append((state_sequence, symbolic_trace))

    print(f"Data generation complete! Collected {len(expert_data)} (state, trace) pairs.")
    return expert_data


if __name__ == '__main__':
    # 1. 创建唯一且权威的 SymbolicManager 实例
    symbol_manager = SymbolicManager(
        num_processes=CONFIG['NUM_PROCESSES'],
        num_resources=CONFIG['NUM_RESOURCES']
    )

    # 2. 动态更新 CONFIG，确保 VOCAB_SIZE 的准确性
    CONFIG['VOCAB_SIZE'] = symbol_manager.get_vocab_size()
    print(f"Dynamically generated vocabulary size: {CONFIG['VOCAB_SIZE']}")


    def custom_collate(batch):
        """自定义的 collate 函数，用于处理 DataLoader 中的批次数据。"""
        states, traces = zip(*batch)
        states_tensor = torch.stack([torch.tensor(s, dtype=torch.float32) for s in states])
        traces_padded = pad_sequence(
            [torch.tensor(t, dtype=torch.long) for t in traces],
            batch_first=True,
            padding_value=symbol_manager.PAD
        )
        return states_tensor, traces_padded


    # 4. 使用动态词汇表大小初始化模型
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    temp_env = make_env()
    temp_obs, _ = temp_env.reset()
    sample_seq = process_observation_to_sequence(temp_obs)
    feature_dim = sample_seq.shape[1]
    model = NumericalCoATNet(
        feature_dim=feature_dim,
        vocab_size=CONFIG['VOCAB_SIZE'],
        d_model=CONFIG['D_MODEL'],
        nhead=CONFIG['N_HEAD'],
        num_encoder_layers=CONFIG['NUM_ENCODER_LAYERS'],
        num_decoder_layers=CONFIG['NUM_DECODER_LAYERS'],
    ).to(device)

    # 5. 生成数据，构建 Dataset 和 DataLoader
    expert_traces = generate_expert_data(
        CONFIG['NUM_TRAJECTORIES'],
        CONFIG['NUM_PROCESSES'],
        CONFIG['NUM_RESOURCES'],
        symbol_manager
    )
    dataset = ExpertTraceDataset(expert_traces)
    dataloader = DataLoader(dataset, batch_size=CONFIG['BATCH_SIZE'], shuffle=True, collate_fn=custom_collate)

    # 6. 设置损失函数和优化器
    criterion = torch.nn.CrossEntropyLoss(ignore_index=symbol_manager.PAD)
    optimizer = optim.AdamW(model.parameters(), lr=CONFIG['LEARNING_RATE'])

    print("\n--- Starting Supervised Fine-Tuning (SFT for Seq2Seq) ---")
    model.train()
    for epoch in range(CONFIG['EPOCHS']):
        total_loss = 0
        for batch_states, batch_traces in dataloader:
            batch_states = batch_states.to(device)
            batch_traces = batch_traces.to(device)

            decoder_input = batch_traces[:, :-1]
            target_output = batch_traces[:, 1:]

            output_logits = model(batch_states, decoder_input, pad_id=symbol_manager.PAD)

            loss = criterion(output_logits.view(-1, CONFIG['VOCAB_SIZE']), target_output.reshape(-1))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            total_loss += loss.item()

        avg_loss = total_loss / len(dataloader)
        if (epoch + 1) % 10 == 0:
            print(f"Epoch {epoch + 1}/{CONFIG['EPOCHS']}, Average Loss: {avg_loss:.4f}")

    print("\n--- Training complete ---")

    # 7. 保存模型和包含动态词汇表大小的配置
    experiment_name = "sft_symbolic"
    timestamp = time.strftime('%Y%m%d_%H%M%S')
    save_dir = os.path.join('models', f'{experiment_name}_{timestamp}')
    os.makedirs(save_dir, exist_ok=True)
    print(f"\nAll artifacts will be saved to: {save_dir}")

    model_path = os.path.join(save_dir, "model.pth")
    config_path = os.path.join(save_dir, "config.json")

    torch.save(model.state_dict(), model_path)
    print(f"Model saved to: {model_path}")

    with open(config_path, 'w') as f:
        json.dump(CONFIG, f, indent=4)
    print(f"Configuration saved to: {config_path}")

