import os
import torch
import torch.nn as nn


class NoOpLWG(torch.nn.Module):
    def __init__(self):
        super().__init__()

    @torch.no_grad()
    def forward(self, features: torch.Tensor) -> torch.Tensor:
        # Return 1.0 for all (always write) to keep behavior unchanged when enabled without model
        return torch.ones(features.shape[0], dtype=torch.float32, device=features.device)


def build_lwg_from_config(config: dict):
    """
    Build a lightweight write-gate model from config if path exists, else return a NoOp gate.
    """
    path = config.get("LWG_MODEL_PATH", None)
    if not config.get("USE_LWG", False):
        return None
    if path is None or (isinstance(path, str) and len(path) == 0) or (isinstance(path, str) and not os.path.exists(path)):
        raise FileNotFoundError("LWG enabled but LWG_MODEL_PATH is missing or not found.")
    try:
        payload = torch.load(path, map_location="cpu")
        if isinstance(payload, torch.nn.Module):
            payload.eval()
            return payload
        # state_dict payload
        in_dim = payload.get('in_dim', 5)
        hidden = payload.get('hidden', [64, 16])
        state_dict = payload['state_dict']
        # Decide model structure by key namespace
        needs_net_prefix = any(k.startswith('net.') for k in state_dict.keys())
        model = _build_mlp(in_dim=in_dim, hidden=hidden, with_prefix=True)
        # If keys are without 'net.' prefix, add it on-the-fly
        if not needs_net_prefix:
            state_dict = { (f"net.{k}" if not k.startswith('net.') else k): v for k, v in state_dict.items() }
        model.load_state_dict(state_dict, strict=False)
        model.eval()
        return model
    except Exception as e:
        raise RuntimeError(f"Failed to load LWG model from '{path}': {e}")

class GateMLP(nn.Module):
    def __init__(self, in_dim: int, hidden):
        super().__init__()
        layers = []
        last = in_dim
        for h in hidden:
            layers += [nn.Linear(last, h), nn.ReLU(inplace=True)]
            last = h
        layers += [nn.Linear(last, 1), nn.Sigmoid()]
        self.net = nn.Sequential(*layers)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return self.net(x).view(-1)


def _build_mlp(in_dim=5, hidden=(64, 16), with_prefix: bool = True) -> nn.Module:
    # with_prefix True returns GateMLP (keys prefixed by 'net.'), to match training script
    if with_prefix:
        return GateMLP(in_dim=in_dim, hidden=hidden)
    # fallback plain sequential
    layers = []
    last = in_dim
    for h in hidden:
        layers += [nn.Linear(last, h), nn.ReLU(inplace=True)]
        last = h
    layers += [nn.Linear(last, 1), nn.Sigmoid()]
    return nn.Sequential(*layers)


