import torch
import torch.nn as nn
from typing import Optional

try:
    from transformers import VideoMAEForVideoClassification, AutoConfig
except Exception as e:
    VideoMAEForVideoClassification = None
    AutoConfig = None


class VideoMAEClassifier(nn.Module):
    def __init__(
        self,
        pretrained_model: str = "MCG-NJU/videomae-base",
        num_labels: int = 4,
        finetune_last_n_layers: int = 4,
        pretrained: bool = True,
    ) -> None:
        super().__init__()
        if VideoMAEForVideoClassification is None:
            raise ImportError("需要安装 transformers>=4.30：pip install transformers")
        if pretrained:
            self.model = VideoMAEForVideoClassification.from_pretrained(
                pretrained_model,
                num_labels=num_labels,
                ignore_mismatched_sizes=True,
            )
        else:
            if AutoConfig is None:
                raise ImportError("需要安装 transformers")
            config = AutoConfig.from_pretrained(pretrained_model)
            config.num_labels = num_labels
            self.model = VideoMAEForVideoClassification(config)
        try:
            if hasattr(self.model, 'config'):
                self.model.config.num_channels = 3
        except Exception:
            pass
        self._freeze_layers(finetune_last_n_layers)

    def _freeze_layers(self, finetune_last_n_layers: int = 0) -> None:
        try:
            enc = getattr(self.model, 'videomae', None)
            if enc is None:
                return
            layers = None
            if hasattr(enc, 'encoder') and hasattr(enc.encoder, 'layer'):
                layers = enc.encoder.layer
            elif hasattr(enc, 'encoder') and hasattr(enc.encoder, 'layers'):
                layers = enc.encoder.layers
            if layers is None:
                return
            total = len(layers)
            if finetune_last_n_layers < 0 or finetune_last_n_layers >= total:
                for p in enc.parameters():
                    p.requires_grad = True
                return
            keep = max(0, finetune_last_n_layers)
            if hasattr(enc, 'embeddings'):
                for p in enc.embeddings.parameters():
                    p.requires_grad = False
            freeze_upto = max(0, total - keep)
            for li, blk in enumerate(layers):
                req = li >= freeze_upto
                for p in blk.parameters():
                    p.requires_grad = req
        except Exception:
            # 安全兜底，不影响训练
            pass

    def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
        try:
            if pixel_values.dim() != 5:
                raise ValueError("pixel_values must be 5D")
            if pixel_values.shape[1] in (1, 3) and pixel_values.shape[2] not in (1, 3):
                pixel_values = pixel_values.permute(0, 2, 1, 3, 4).contiguous()
            c = pixel_values.shape[2]
            if c == 1:
                pixel_values = pixel_values.repeat(1, 1, 3, 1, 1)
            elif c > 3:
                pixel_values = pixel_values[:, :, :3, ...]
            if not hasattr(self, '_dbg_once'):
                self._dbg_once = True
                try:
                    nc = getattr(self.model.config, 'num_channels', 'n/a') if hasattr(self.model, 'config') else 'n/a'
                except Exception:
                    nc = 'n/a'
                print(f"[VideoMAEClassifier] input={tuple(pixel_values.shape)} config.num_channels={nc}", flush=True)
        except Exception:
            pass
        out = self.model(pixel_values=pixel_values)
        return out.logits
