import pytorch_lightning as pl
import torch
from torch import nn
from typing import Tuple, List
import enum
from torchmetrics import Accuracy


class PatchEmbed(nn.Module):
    def __init__(
        self, img_size: Tuple[int] | int, patch_size: Tuple[int] | int,
        in_channels: int, embed_dim: int, tokenlize: bool=False
    ):
        """分块Embedding
        
        利用不重叠计算的二维卷积将图片分块进行embed
        
        Input: (N, C, H, W) or (C, H, W)
        Output: (N, embed_dim, num_patch_h, num_patch_w) or (embed_dim, num_patch_h, num_patch_w)；
        若 `tokenlize=True` 则 (N, embed_dim, num_patch_h·num_patch_w) or (embed_dim, num_patch_h·num_patch_w)
        
        num_patch_h = (H // patch_size[0])
        num_patch_w = (W // patch_size[1])

        Args:
            img_size (Tuple[int] | int): 输入图片的尺寸
            patch_size (Tuple[int] | int): 希望使用的patch尺寸
            in_channels (int): 输入图片的维度数
            embed_dim (int): embedding的维数
            tokenlize (bool): 是否将patch扁平化
        """
        super().__init__()
        self.img_size = (img_size[0], img_size[1]) if isinstance(img_size, Tuple) else (img_size, img_size)
        self.patch_size = (patch_size[0], patch_size[1]) if isinstance(patch_size, Tuple) else (patch_size, patch_size)
        self.tokenize = tokenlize
        
        self.proj = nn.Conv2d(
            in_channels=in_channels,
            out_channels=embed_dim,
            kernel_size=self.patch_size,
            stride=self.patch_size
        )
    
    @property
    def num_patch(self):
        """获取输出的patch图尺寸

        Returns:
            Tuple: (num_patch_h, num_patch_w)
        """
        return (
            self.img_size[0] // self.patch_size[0],
            self.img_size[1] // self.patch_size[1]
        )
        
    def forward(self, x: torch.Tensor):
        # x: (N, C, H, W) or (C, H, W)
        C, H, W = x.shape[-3:]
        assert H == self.img_size[0] and W == self.img_size[1], "PatchEmbed: 实际输入图片大小与预设不符"
        assert C == self.proj.in_channels, "PatchEmbed: 实际输入图片的通道数与预设不符"
        
        # (N, embed_dim, num_patch_h, num_patch_w) or (embed_dim, num_patch_h, num_patch_w)
        x = self.proj(x)
        
        if self.tokenize:
            x = x.flatten(start_dim=-2)

        return x


class HierarchicalTemporalAttention(nn.Module):
    def __init__(
        self, dim: int, num_heads: int, qkv_bias: bool=False, qk_scale: float=None,
        attn_drop: float=0.0, proj_drop: float=0.0, seg_length: List[int]=None
    ):
        """HTA模块
        
        Input: (..., T, C) or (T, C)
        Output: (..., T, C) or (T, C)
        
        seg_length列表中所有分段长度加和应为T

        Args:
            dim (int): 输入序列的通道数
            num_heads (int): 希望使用的head数
            qkv_bias (bool, optional): qkv是否要添加bias
            qk_scale (float, optional): 对qk结果的缩放
            attn_drop (float, optional): 应用于qk结果的丢弃率
            proj_drop (float, optional): 应用于最终输出的丢弃率
            seg_length (List[int], optional): 每个分段的长度
        """
        super().__init__()
        self.num_heads = num_heads
        self.dim = dim
        head_dim = dim // num_heads
        self.qk_scale = qk_scale or head_dim**-0.5
        self.seg_length = sorted(seg_length)
        
        # qkv和proj的顺序均为从后往前（从短到长）
        self.qkv = nn.ModuleList([
            nn.Linear(dim, 3*dim, bias=qkv_bias) for i in range(len(seg_length))
        ])
        self.proj = nn.ModuleList([
            nn.Linear(dim, dim) for i in range(len(seg_length))
        ])
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj_drop = nn.Dropout(proj_drop)
    
    def forward(self, x: torch.Tensor):
        dimension = x.shape
        T, C = dimension[-2:]
        assert T == self.seg_length[-1], "HierarchicalTemporalAttention: 时间维长度与预设值不同"
        assert C == self.dim, "HierarchicalTemporalAttention: 输入通道维度与预设不符"
        
        # (N, T, C)
        x = x.view(-1, T, C)
        N = x.shape[0]
        
        # 各计算结果分段列表，短段在前
        seg_x = []
        for pt, qkv, proj in zip(self.seg_length, self.qkv, self.proj):
            # (N, pt, C)
            sub_x = x[:, -pt:, :]
            # (N, pt, 3·C)
            sub_x = qkv(sub_x)
            # (N, pt, 3, num_heads, head_dim)
            sub_x = sub_x.view(N, pt, 3, self.num_heads, -1).contiguous()
            # (3, N, num_heads, pt, head_dim)
            sub_x = sub_x.permute(2, 0, 3, 1, 4).contiguous()
            
            # (N, num_heads, pt, head_dim)
            q, k, v = sub_x[0], sub_x[1], sub_x[2]
            # (N, num_heads, pt, pt)
            attn = q @ k.transpose(-2, -1) * self.qk_scale
            attn = attn.softmax(dim=-1)
            attn = self.attn_drop(attn)
            # (N, num_heads, pt, head_dim)
            sub_x = attn @ v
            # (N, pt, num_heads·head_dim) 即 (N, pt, C)
            sub_x = sub_x.permute(0, 2, 1, 3).contiguous().view(N, pt, -1).contiguous()
            seg_x.append(sub_x)

        seg_x[0] = self.proj[0](seg_x[0])
        for seg_idx in range(1, len(seg_x)):
            # 前一个为较短段，后一个较长
            _, t_short, _ = seg_x[seg_idx - 1].shape
            seg_x[seg_idx][:, -t_short:, :] *= 0.5
            seg_x[seg_idx][:, -t_short:, :] += 0.5 * seg_x[seg_idx - 1]
            seg_x[seg_idx] = self.proj[seg_idx](seg_x[seg_idx])
        # (N, T, C)
        output = self.proj_drop(seg_x[-1])
        
        return output.view(dimension).contiguous()


class SpatialAttention(nn.Module):
    def __init__(
        self, dim: int, num_heads: int, qkv_bias: bool=False,
        qk_scale: float=None, attn_drop: float=0.0, proj_drop: float=0.0
    ):
        """普通的空间注意力
        
        Input: (..., S, C) or (S, C)
        Output: (..., S, C) or (S, C)

        Args:
            dim (int): 输入序列的通道数
            num_heads (int): 希望使用的head数
            qkv_bias (bool, optional): qkv是否要添加bias
            qk_scale (float, optional): 对qk结果的缩放
            attn_drop (float, optional): 应用于qk结果的丢弃率
            proj_drop (float, optional): 应用于最终输出的丢弃率
        """
        super().__init__()
        self.num_heads = num_heads
        self.dim = dim
        head_dim = dim // num_heads
        self.qk_scale = qk_scale or head_dim**-0.5
        
        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.proj = nn.Linear(dim, dim)
        
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj_drop = nn.Dropout(proj_drop)
    
    
    def forward(self, x: torch.Tensor):
        dimension = x.shape
        S, C = dimension[-2:]

        # (N, S, C)
        x = x.view(-1, S, C)
        N = x.shape[0]
        # (N, S, 3, num_heads, head_dim)
        qkv = self.qkv(x).view(N, S, 3, self.num_heads, -1).contiguous()
        # (3, N, num_heads, S, head_dim)
        qkv = qkv.permute(2, 0, 3, 1, 4).contiguous()
        # (N, num_heads, S, head_dim)
        q, k, v = qkv[0], qkv[1], qkv[2]
        # (N, num_heads, S, S)
        attn = (q @ k.transpose(-2, -1)) * self.qk_scale
        attn = attn.softmax(dim=-1)
        attn = self.attn_drop(attn)
        # (N, num_heads, S, head_dim)
        x = attn @ v
        # (N, S, num_heads·head_dim) 即 (N, S, C)
        x = x.permute(0, 2, 1, 3).contiguous().view(N, S, -1).contiguous()
        x = self.proj_drop(self.proj(x))
        
        return x.view(dimension).contiguous()


class DropPath(nn.Module):
    def __init__(self, drop_rate: float=0.0):
        super().__init__()
        self.drop_rate = drop_rate
    
    def forward(self, x: torch.Tensor):
        if self.drop_rate == 0.0 or not self.training:
            return x
        keep_rate = 1 - self.drop_rate
        shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
        prob_matrix = x.new_empty(shape).bernoulli_(keep_rate)
        if keep_rate > 0.0:
            prob_matrix.div_(keep_rate)
        return x * prob_matrix


class LabelSmoothingCrossEntropy(nn.Module):
    """标签平滑交叉熵
    
    target为label形式，而不是one-hot形式
    
    Input: x(..., C) target(...)
    Output: 单个loss值
    """
    def __init__(self, smoothing=0.1):
        super().__init__()
        assert smoothing < 1.0
        self.smoothing = smoothing
        self.confidence = 1. - smoothing

    def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
        logprobs = nn.functional.log_softmax(x, dim=-1)
        nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
        nll_loss = nll_loss.squeeze(1)
        smooth_loss = -logprobs.mean(dim=-1)
        loss = self.confidence * nll_loss + self.smoothing * smooth_loss
        return loss.mean()


class SurgFormerBlock(nn.Module):
    class AggregationStrategy(enum.Enum):
        """用于CLS TOKEN的聚合策略
        """
        MA = "mean"
        TFA = "target frame-centric"
        
    def __init__(
        self, dim: int, num_heads: int, seg_length: List[int], qkv_bias: bool=False,
        qk_scale: float=None, attn_drop: float=0.0, proj_drop: float=0.0, path_drop: float=0.3, mlp_drop: float=0.0,
        act_layer: nn.Module=nn.GELU, norm_layer: nn.Module=nn.LayerNorm, mlp_ratio: float=4.0,
        aggregation_strategy: AggregationStrategy=AggregationStrategy.MA
    ):
        """SurgFormer的主要基本构成块
        
        Input: x(B, S, T, C) cls_token(B, T, C)
        Output: (B, S, T, C) (B, T, C)

        Args:
            dim (int): 输入序列的通道数
            num_heads (int): 希望使用的head数
            seg_length (List[int]): HTA中使用的每个分段的长度，从小到大（从后往前）
            qkv_bias (bool, optional): qkv是否要添加bias
            qk_scale (float, optional): 对qk结果的缩放
            attn_drop (float, optional): 应用于qk结果的丢弃率
            proj_drop (float, optional): 应用于attention最终输出的丢弃率
            path_drop (float, optional): 对时域、空域注意力输出以及mlp输出的丢弃率
            mlp_drop (float, optional): MLP内部使用的丢弃率
            act_layer (nn.Module, optional): 希望使用的激活类型
            norm_layer (nn.Module, optional): 希望使用的标准化类型
            mlp_ratio (float, optional): mlp内部隐藏通道数相较于输入序列通道数的比例
            aggregation_strategy (AggregationStrategy, optional): cls token要使用的聚合策略
        """
        super().__init__()
        self.drop_path = DropPath(path_drop)
        self.dim = dim
        self.aggregation_strategy = aggregation_strategy
        self.temporal_norm = norm_layer(dim)
        self.temporal_attn = HierarchicalTemporalAttention(
            dim, num_heads, qkv_bias, qk_scale, attn_drop, proj_drop, seg_length
        )
        self.temporal_fc = nn.Linear(dim, dim)
        
        self.spatial_norm = norm_layer(dim)
        self.spatial_attn = SpatialAttention(
            dim, num_heads, qkv_bias, qk_scale, attn_drop, proj_drop
        )
        
        self.mlp_norm = norm_layer(dim)
        
        if aggregation_strategy == SurgFormerBlock.AggregationStrategy.TFA:
            self.cls_norm = norm_layer(dim)
        
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = nn.Sequential(
            nn.Linear(dim, mlp_hidden_dim),
            act_layer(),
            nn.Dropout(mlp_drop),
            nn.Linear(mlp_hidden_dim, dim),
            nn.Dropout(mlp_drop)
        )
        
    
    def forward(self, x: torch.Tensor, cls_token: torch.Tensor):
        B, S, T, C = x.shape
        # (B·S, T, C)
        x = x.reshape(-1, T, C).contiguous()
        
        # 对输入序列求取temporal self-attention
        # (B·S, T, C)
        temp_attn = self.temporal_attn(self.temporal_norm(x))
        temp_attn = self.drop_path(temp_attn)
        # (B, S·T, C)
        x = x.view(B, -1, C)
        # (B, S·T, C)
        temp_out = self.temporal_fc(temp_attn.view(B, -1, C)) + x
        
        # 对输入序列求取spatial self-attention
        # (B, T, C)
        origin_cls_token = cls_token[:, 0, :].unsqueeze(1)
        # (B·T, C)
        cls_token = cls_token.view(-1, C).contiguous().unsqueeze(1)
        # (B, T, S, C)
        x = temp_out.view(B, -1, T, C).permute(0, 2, 1, 3).contiguous()
        # (B·T, S, C)
        x = x.view(-1, S, C).contiguous()
        # (B·T, 1+S, C)
        x = torch.cat([cls_token, x], dim=1)
        # (B·T, 1+S, C)
        spatial_out = self.spatial_attn(self.spatial_norm(x))
        spatial_out = self.drop_path(spatial_out)
        # 分离经过了spatial self-attention的cls_token
        # (B, T, C)
        cls_token = spatial_out[:, 0, :].view(B, T, C).contiguous()
        # (B·T, S, C)
        spatial_out = spatial_out[:, 1:, :]
        
        # cls_token aggregation
        # 分为两种策略
        if self.aggregation_strategy == SurgFormerBlock.AggregationStrategy.MA:
            # (B, 1, C)
            cls_token = torch.mean(cls_token, dim=1, keepdim=True)
        elif self.aggregation_strategy == SurgFormerBlock.AggregationStrategy.TFA:
            # (B, T, C)
            cls_token = self.cls_norm(cls_token)
            # 当前（最新）帧对应的token
            # (B, 1, C)
            target_token = cls_token[:, -1, :]
            # (B, 1, T)
            attn = target_token @ cls_token.transpose(-2, -1)
            attn = attn.softmax(dim=-1)
            # (B, 1, C)
            cls_token = attn @ cls_token
        # (B, S, T, C)
        spatial_out = spatial_out.view(B, -1, S, C).transpose(1, 2).contiguous()
        # (B, S·T, C)
        spatial_out = spatial_out.view(B, -1, C)
        # (B, 1+S·T, C)
        x = torch.cat([origin_cls_token, temp_out], dim=1) + \
            torch.cat([cls_token, spatial_out], dim=1)
        # (B, 1+S·T, C)
        x = self.drop_path(self.mlp(self.mlp_norm(x)))
        
        # (B, T, C)
        cls_token = x[:, 0, :].unsqueeze(1).repeat((1, T, 1))
        # (B, S, T, C)
        x = x[:, 1:, :].view(B, S, T, C)

        return x, cls_token


class LitSurgFormer(pl.LightningModule):
    def __init__(
        self, img_size: Tuple[int] | int, num_heads: int,
        seg_length: List[int]=[4, 8, 16], num_frames: int=16, qkv_bias: bool=False,
        qk_scale: float=None, attn_drop: float=0.0, proj_drop: float=0.0,
        path_drop: float=0.3, mlp_drop: float=0.0, token_drop: float=0.0, other_drop: float=0.0,
        act_layer: nn.Module=nn.GELU, norm_layer: nn.Module=nn.LayerNorm, mlp_ratio: float=4.0,
        aggregation_strategy: SurgFormerBlock.AggregationStrategy=SurgFormerBlock.AggregationStrategy.MA,
        patch_size: Tuple[int] | int=16, in_channels: int=3, embed_dim: int=768,
        tokenlize: bool=True, depth: int=12, num_classes: int=7, lr: float=5e-4,
        weight_decay: float=1e-5, smoothing: float=0.1
    ):
        """SurgFormer完整模型
        
        训练时，ground truth标签尺寸应为 (B,)
        
        Input: (B, C, T, H, W)
        Output: (B, num_classes)

        Args:
            img_size (Tuple[int] | int): 输入图片的原始尺寸
            num_heads (int): SurgFormerBlock要使用的head数
            seg_length (List[int], optional): HTA中使用的每个分段的长度，从小到大（从后往前）
            num_frames (int, optional): 一次输入的图片序列长度，即一次输入的帧数
            qkv_bias (bool, optional): qkv是否要添加bias
            qk_scale (float, optional): 对qk结果的缩放
            attn_drop (float, optional): 应用于qk结果的丢弃率
            proj_drop (float, optional): 应用于attention最终输出的丢弃率
            path_drop (float, optional): 对时域、空域注意力输出以及mlp输出的丢弃率
            mlp_drop (float, optional): MLP内部使用的丢弃率
            token_drop (float, optional): 对最终得到的cls token的丢弃率
            other_drop (float, optional): 其他部分使用的丢弃率
            act_layer (nn.Module, optional): 希望使用的激活类型
            norm_layer (nn.Module, optional): 希望使用的标准化类型
            mlp_ratio (float, optional): mlp内部隐藏通道数相较于输入序列通道数的比例
            aggregation_strategy (SurgFormerBlock.AggregationStrategy, optional): cls token要使用的聚合策略
            patch_size (Tuple[int] | int): 希望使用的patch尺寸
            in_channels (int): 输入图片的维度数
            embed_dim (int): embedding的维数
            tokenlize (bool): 是否将patch扁平化
            depth (int, optional): SurgFormer的深度，即使用的block个数
            num_classes (int, optional): 最终分类的类别数
            lr (float, optional): 训练学习率
            weight_decay (float, optional): 权重衰减
            smoothing (float, optional): 损失计算时使用的平滑率
        """
        super().__init__()
        
        self.patch_embed = PatchEmbed(img_size, patch_size, in_channels, embed_dim, tokenlize)
        self.depth = depth
        self.num_frames = num_frames
        self.lr = lr
        self.weight_decay = weight_decay
        self.smoothing = smoothing
        
        num_patch_h, num_patch_w = self.patch_embed.num_patch
        # 单个patch图中patch的数量
        num_patch = num_patch_h * num_patch_w
        # (1, 1+K, C)
        self.pos_embedding = nn.Parameter(torch.zeros(
            1, 1 + num_patch, embed_dim
        ))
        # (1, T, C)
        self.time_embedding = nn.Parameter(torch.zeros(
            1, num_frames, embed_dim
        ))
        # (1, 1, C)
        self.cls_token = nn.Parameter(torch.zeros(
            1, 1, embed_dim
        ))
        nn.init.trunc_normal_(self.pos_embedding, std=0.02)
        nn.init.trunc_normal_(self.cls_token, std=0.02)
        
        self.pos_drop = nn.Dropout(other_drop)
        self.time_drop = nn.Dropout(other_drop)
        
        self.blocks = nn.ModuleList([
            SurgFormerBlock(
                embed_dim, num_heads, seg_length, qkv_bias, qk_scale,
                attn_drop, proj_drop, path_drop, mlp_drop, act_layer, norm_layer,
                mlp_ratio, aggregation_strategy
            ) for i in range(depth)
        ])
        
        for cnt, mod in enumerate(self.blocks):
            if cnt == 0:
                continue
            nn.init.constant_(mod.temporal_fc.weight, 0)
            nn.init.constant_(mod.temporal_fc.bias, 0)
        
        self.out_norm = norm_layer(embed_dim)
        
        self.token_drop = nn.Dropout(token_drop)
        self.head = nn.Linear(embed_dim, num_classes)
        
        self.apply(self._init_weights)
        
        # 训练用工具
        if smoothing > 0.0:
            self.ce_loss = LabelSmoothingCrossEntropy(smoothing=smoothing)
        else:
            self.ce_loss = nn.CrossEntropyLoss()
        self.accuracy_metric = Accuracy("multiclass", num_classes=num_classes)
        self.total_loss = 0
        self.batch_cnt = 0
    
    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.trunc_normal_(m.weight, std=0.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
    
    def forward(self, x: torch.Tensor):
        # x: (B, C, T, H, W)
        B, C, T, H, W = x.shape
        # (B·T, C, H, W)
        x = x.transpose(1, 2).contiguous().view(-1, C, H, W).contiguous()
        # (B·T, embed_dim, num_patch) 记为 (B·T, C, K)
        x = self.patch_embed(x)
        # (B·T, K, C)
        x = x.transpose(1, 2).contiguous()
        K, C = x.shape[-2:]
        
        # 带上cls token后，添加position embedding
        # (B·T, 1, C)
        cls_token = self.cls_token.expand(x.shape[0], -1, -1)
        # (B·T, 1+K, C)
        x = torch.cat([cls_token, x], dim=1)
        x += self.pos_embedding
        x = self.pos_drop(x)
        # 将cls token分离出来
        # (B, T, C)
        cls_token = x[:, 0, :].view(B, T, -1).contiguous()
        # (B, K, T, C)
        x = x[:, 1:, :].view(B, T, K, -1).transpose(1, 2).contiguous()
        # (B·K, T, C)
        x = x.view(-1, T, C).contiguous()
        x += self.time_embedding
        x = self.time_drop(x)
        # (B, K, T, C)
        x = x.view(B, K, T, -1).contiguous()
        
        for blk in self.blocks:
            x, cls_token = blk(x, cls_token)

        # (B, K·T, C)
        x = x.view(B, -1, C).contiguous()
        # (B, 1, C)
        cls_token = cls_token[:, 0, :].unsqueeze(1)
        # (B, 1+K·T, C)
        output = self.out_norm(torch.cat([cls_token, x], dim=1))
        
        # 使用CLS TOKEN分类即可
        # (B, C)
        cls_token = self.token_drop(output[:, 0, :])
        
        # (B, num_classes)
        x = self.head(cls_token)
        
        return x
    
    def on_train_epoch_start(self):
        self.accuracy_metric.reset()
        self.total_loss = 0
        self.batch_cnt = 0

    def training_step(self, batch, batch_idx):
        data, target = batch
        pred = self.forward(data)
        loss = self.ce_loss(pred, target)
        acc = self.accuracy_metric(pred, target)
        self.total_loss += loss
        self.batch_cnt += 1
        
        self.log("step_loss", loss, prog_bar=True, on_step=True, on_epoch=False)
        self.log("step_acc", acc, prog_bar=True, on_step=True, on_epoch=False)
        
        return loss

    def on_train_epoch_end(self):
        self.log("epoch_avg_acc", self.accuracy_metric.compute(), on_epoch=True, logger=True)
        self.log("epoch_avg_loss", self.total_loss / self.batch_cnt, on_epoch=True, logger=True)
    
    def configure_optimizers(self):
        no_decay, decay = [], []
        # 筛选不需要weight decay的参数
        skip_weight_decay = ["pos_embedding", "cls_token", "time_embedding"]
        for name, param in self.named_parameters():
            if name.endswith(".bias") or name in skip_weight_decay or len(param.shape) == 1:
                no_decay.append(param)
            else:
                decay.append(param)

        optimizer = torch.optim.AdamW([
            {"params": no_decay, "weight_decay": 0.0},
            {"params": decay, "weight_decay": self.weight_decay}
        ], lr=self.lr)
        
        return optimizer

    def on_validation_epoch_start(self):
        self.accuracy_metric.reset()
        self.total_loss = 0
        self.batch_cnt = 0

    def validation_step(self, batch, batch_idx):
        data, target = batch
        pred = self.forward(data)
        loss = self.ce_loss(pred, target)
        acc = self.accuracy_metric(pred, target)
        self.total_loss += loss
        self.batch_cnt += 1
        
        self.log("step_loss", loss, prog_bar=True, on_step=True, on_epoch=False)
        self.log("step_acc", acc, prog_bar=True, on_step=True, on_epoch=False)
        
        return loss
    
    def on_validation_epoch_end(self):
        avg_loss = self.total_loss / self.batch_cnt
        self.log("val_avg_loss", avg_loss, prog_bar=False, on_epoch=True, logger=True)
        self.log("val_avg_acc", self.accuracy_metric.compute(), prog_bar=False, on_epoch=True, logger=True)


if __name__ == "__main__":
    B, T, H, W, C = 8, 16, 96, 96, 3
    model = LitSurgFormer(96, 6, depth=4).cuda()
    frame_data = torch.randn((B, C, T, H, W)).cuda()
    target = torch.randint(0, 7, (B,)).cuda()
    res = model(frame_data)
    print(res)
