import os
import torch
import re
from madt.sc2.models.gpt_model import GPT, GPTConfig
from madt.sc2.framework.dim_infer import infer_dims_from_file


def extract_number(filename):
    return int(re.findall(r'\d+', filename)[0])  # 提取文件名中的数字部分
def load_actor_model(model_path, device='cpu', verbose=False):
    """
    自动加载 GPT actor 模型，并根据权重推断模型结构。
    要求 JSON 数据路径为 offline_data/{map_name}/good/*.json
    """
    # 查找最新模型
    if not os.path.isdir(model_path):
        raise FileNotFoundError(f"模型路径不存在: {model_path}")

    model_files = sorted(
        [f for f in os.listdir(model_path) if f.endswith(".pkl") or f.endswith(".pt")],
        key=lambda f: os.path.getmtime(os.path.join(model_path, f))
    )
    if not model_files:
        raise FileNotFoundError(f"模型路径中无可加载的权重: {model_path}")

    latest_file = model_files[-1]
    model_file_path = os.path.join(model_path, latest_file)

    if verbose:
        print(f"Found {len(model_files)} model files:")
        for f in model_files:
            print(f" - {f}")
        print(f"\nSelecting latest model: {latest_file}")

    # 从 JSON 数据推断模型维度
    # 推测数据目录为：offline_data/{map_name}/good/*.json
    map_name = os.path.basename(os.path.dirname(model_path))
    data_path = os.path.join("offline_data", map_name, "good", "*.json")
    global_dim, local_dim, action_dim = infer_dims_from_file(data_path)


    config = GPTConfig(
        input_dim=local_dim,
        vocab_size=action_dim,
        block_size=3,
        n_layer=2,
        n_head=2,
        n_embd=32,
        state_size=local_dim,
        model_type="state_only"
    )
    model = GPT(config, model_type="actor").to(device)
    state_dict = torch.load(model_file_path, map_location=device)
    model.load_state_dict(state_dict)
    model.eval()

    return model

