import os
import torch
import torch.nn as nn


class NoOpBufferGate(nn.Module):
    def __init__(self):
        super().__init__()

    @torch.no_grad()
    def forward(self, features: torch.Tensor) -> torch.Tensor:
        # Always accept
        return torch.ones(features.shape[0], dtype=torch.float32, device=features.device)


class BufferGateMLP(nn.Module):
    def __init__(self, in_dim: int, hidden):
        super().__init__()
        layers = []
        last = in_dim
        for h in hidden:
            layers += [nn.Linear(last, h), nn.ReLU(inplace=True)]
            last = h
        layers += [nn.Linear(last, 1), nn.Sigmoid()]
        self.net = nn.Sequential(*layers)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return self.net(x).view(-1)


def build_buffer_gate_from_config(config: dict):
    if not config.get("USE_BUFFER_GATE", False):
        return None
    path = config.get("BUFFER_GATE_MODEL_PATH", None)
    if not path:
        raise FileNotFoundError("USE_BUFFER_GATE=True 但未提供 BUFFER_GATE_MODEL_PATH。")
    if not os.path.exists(path):
        raise FileNotFoundError(f"Buffer gate model 路径不存在: {path}")
    payload = torch.load(path, map_location="cpu")
    if isinstance(payload, nn.Module):
        payload.eval()
        return payload
    in_dim = payload.get("in_dim", 12)
    hidden = payload.get("hidden", [64, 16])
    state_dict = payload.get("state_dict", payload)
    model = BufferGateMLP(in_dim=in_dim, hidden=hidden)
    needs_prefix = any(k.startswith("net.") for k in state_dict.keys())
    if not needs_prefix:
        state_dict = {f"net.{k}" if not k.startswith("net.") else k: v for k, v in state_dict.items()}
    model.load_state_dict(state_dict, strict=False)
    model.eval()
    return model
