"""
Point Transformer V2 Mode (recommend)

Disable Grouped Linear

Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""

from copy import deepcopy
import math
import os
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from torch_geometric.nn.pool import voxel_grid
from torch_scatter import segment_csr

import einops
from timm.models.layers import DropPath
import pointops

from pointcept.models.builder import MODELS
from pointcept.models.utils import offset2batch, batch2offset
import pointcept.utils.comm as comm

from .point_super_token_attention import PointSuperTokenAttention
from .sparse_attention import SparseAttention
from .super_token_fuser import get_super_token_fuser
from .super_token_initializers import get_super_token_initializer
from .basic_blocks import SIRENEmbed

class PointBatchNorm(nn.Module):
    """
    Batch Normalization for Point Clouds data in shape of [B*N, C], [B*N, L, C]
    """

    def __init__(self, embed_channels):
        super().__init__()
        self.norm = nn.BatchNorm1d(embed_channels)

    def forward(self, input: torch.Tensor) -> torch.Tensor:
        if input.dim() == 3:
            return (
                self.norm(input.transpose(1, 2).contiguous())
                .transpose(1, 2)
                .contiguous()
            )
        elif input.dim() == 2:
            return self.norm(input)
        else:
            raise NotImplementedError


class GroupedVectorAttention(nn.Module):
    def __init__(
        self,
        embed_channels,
        groups,
        attn_drop_rate=0.0,
        qkv_bias=True,
        pe_multiplier=False,
        pe_bias=True,
    ):
        super(GroupedVectorAttention, self).__init__()
        self.embed_channels = embed_channels
        self.groups = groups
        assert embed_channels % groups == 0
        self.attn_drop_rate = attn_drop_rate
        self.qkv_bias = qkv_bias
        self.pe_multiplier = pe_multiplier
        self.pe_bias = pe_bias

        self.linear_q = nn.Sequential(
            nn.Linear(embed_channels, embed_channels, bias=qkv_bias),
            PointBatchNorm(embed_channels),
            nn.ReLU(inplace=True),
        )
        self.linear_k = nn.Sequential(
            nn.Linear(embed_channels, embed_channels, bias=qkv_bias),
            PointBatchNorm(embed_channels),
            nn.ReLU(inplace=True),
        )

        self.linear_v = nn.Linear(embed_channels, embed_channels, bias=qkv_bias)

        if self.pe_multiplier:
            self.linear_p_multiplier = nn.Sequential(
                nn.Linear(3, embed_channels),
                PointBatchNorm(embed_channels),
                nn.ReLU(inplace=True),
                nn.Linear(embed_channels, embed_channels),
            )
        if self.pe_bias:
            self.linear_p_bias = nn.Sequential(
                nn.Linear(3, embed_channels),
                PointBatchNorm(embed_channels),
                nn.ReLU(inplace=True),
                nn.Linear(embed_channels, embed_channels),
            )
        self.weight_encoding = nn.Sequential(
            nn.Linear(embed_channels, groups),
            PointBatchNorm(groups),
            nn.ReLU(inplace=True),
            nn.Linear(groups, groups),
        )
        self.softmax = nn.Softmax(dim=1)
        self.attn_drop = nn.Dropout(attn_drop_rate)

    def forward(self, feat, coord, reference_index):
        query, key, value = (
            self.linear_q(feat),
            self.linear_k(feat),
            self.linear_v(feat),
        )
        key = pointops.grouping(reference_index, key, coord, with_xyz=True)
        value = pointops.grouping(reference_index, value, coord, with_xyz=False)
        pos, key = key[:, :, 0:3], key[:, :, 3:]
        relation_qk = key - query.unsqueeze(1)
        if self.pe_multiplier:
            pem = self.linear_p_multiplier(pos)
            relation_qk = relation_qk * pem
        if self.pe_bias:
            peb = self.linear_p_bias(pos)
            relation_qk = relation_qk + peb
            value = value + peb

        weight = self.weight_encoding(relation_qk)
        weight = self.attn_drop(self.softmax(weight))

        mask = torch.sign(reference_index + 1)
        weight = torch.einsum("n s g, n s -> n s g", weight, mask)
        value = einops.rearrange(value, "n ns (g i) -> n ns g i", g=self.groups)
        feat = torch.einsum("n s g i, n s g -> n g i", value, weight)
        feat = einops.rearrange(feat, "n g i -> n (g i)")
        return feat


class Block(nn.Module):
    def __init__(
        self,
        embed_channels,
        groups,
        qkv_bias=True,
        pe_multiplier=False,
        pe_bias=True,
        attn_drop_rate=0.0,
        drop_path_rate=0.0,
        psta_cfg=None,
        enable_checkpoint=False,
        block_idx=-1,
        layer_idx=-1,
    ):
        super(Block, self).__init__()
        self.attn = GroupedVectorAttention(
            embed_channels=embed_channels,
            groups=groups,
            qkv_bias=qkv_bias,
            attn_drop_rate=attn_drop_rate,
            pe_multiplier=pe_multiplier,
            pe_bias=pe_bias,
        )
        self.with_psta = psta_cfg is not None
        if self.with_psta:
            pe_cfg = psta_cfg["pe_cfg"]
            num_heads = psta_cfg.get("num_heads", 8)
            norm_loc = psta_cfg.get("norm_loc", "pre")
            block_pe = psta_cfg.get("block_pe", False)
            # pe
            self.block_pe = None
            if block_pe:
                block_pe_cfg = pe_cfg.copy()
                block_pe_cfg["n_dim"] = 3
                block_pe_cfg["d_embed"] = embed_channels
                block_pe_cfg["d_hidden"] = embed_channels
                self.block_pe = SIRENEmbed(**block_pe_cfg)
            # initializer
            init_cfg = psta_cfg["initializer_cfg"]
            init_cfg["d_embed"] = embed_channels
            init_cfg["n_dim"] = 3
            init_cfg["pe_cfg"] = pe_cfg
            init_cfg["block_idx"] = block_idx
            init_cfg["layer_idx"] = layer_idx
            self.st_initializer = get_super_token_initializer(init_cfg)
            # fuser
            fuser_cfg = psta_cfg["fuser_cfg"]
            fuser_cfg["d_embed"] = embed_channels
            fuser_cfg["n_dim"] = 3
            fuser_cfg["num_heads"] = num_heads
            fuser_cfg["dropout"] = attn_drop_rate
            fuser_cfg["norm_loc"] = norm_loc
            fuser_cfg["pe_cfg"] = pe_cfg
            self.st_fuser = get_super_token_fuser(fuser_cfg)
            # sparse attn
            ca_cfg = psta_cfg["ca_cfg"]
            ca_cfg["norm_loc"] = norm_loc
            ca_cfg["enable_checkpoint"] = False # avoid error in sharing
            self.ca_down = SparseAttention(
                embed_channels, num_heads, attn_drop_rate, 0.0, 
                residual=True, **ca_cfg
            )
            self.ca_up = SparseAttention(
                embed_channels, num_heads, attn_drop_rate, drop_path_rate,
                residual=True, **ca_cfg
            )
        self.fc1 = nn.Linear(embed_channels, embed_channels, bias=False)
        self.fc3 = nn.Linear(embed_channels, embed_channels, bias=False)
        self.norm1 = PointBatchNorm(embed_channels)
        self.norm2 = PointBatchNorm(embed_channels)
        self.norm3 = PointBatchNorm(embed_channels)
        self.act = nn.ReLU(inplace=True)
        self.enable_checkpoint = enable_checkpoint
        self.drop_path = (
            DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
        )
        self.debug = False
        self.dbg_states = {}

    def forward(self, points, reference_index, info=None):
        if self.debug:
            self.dbg_states = { "inputs": [points, reference_index, info] }
        coord, feat, offset = points
        identity = feat
        if feat.isnan().any() or feat.isinf().any():
            print(f"[warning] nan or inf in input feat: nan={feat.isnan().any()} inf={feat.isinf().any()}")
            feat.nan_to_num_(0, 10, -10)
        if self.with_psta:
            if self.block_pe is not None:
                feat = feat + self.block_pe(coord)
            st_info = self.st_initializer(coord, feat, offset) if info is None else info
            asso = st_info["asso"]
            bias = st_info.get("rel_bias", None)
            q = st_info["st_feat"]
            k = v = feat
            tokens, weights0 = self.ca_down(q, k, v, asso, bias)
            if self.debug:
                self.dbg_states["st_info"] = { k: v for k, v in st_info.items() }
                self.dbg_states["tokens0"] = tokens
                self.dbg_states["weight0"] = weights0
        feat = self.act(self.norm1(self.fc1(feat)))
        feat = (
            self.attn(feat, coord, reference_index)
            if not self.enable_checkpoint
            else checkpoint(self.attn, feat, coord, reference_index)
        )
        feat = self.act(self.norm2(feat))
        feat = self.norm3(self.fc3(feat))
        feat = identity + self.drop_path(feat)
        feat = self.act(feat)
        if self.debug:
            self.dbg_states["feat0"] = feat
        if self.with_psta:
            tokens, fuser_info = self.st_fuser(tokens, st_info)
            self.st_initializer.store_glb_state("st_feat", tokens)
            q = feat
            k = v = tokens
            asso = asso[..., [1, 0]]
            feat, weights1 = self.ca_up(q, k, v, asso, bias)
            if self.debug:
                self.dbg_states["feat1"] = feat
                self.dbg_states["tokens1"] = tokens
                self.dbg_states["weight1"] = weights1
                self.dbg_states["fuser_info"] = fuser_info
        if feat.isnan().any() or (self.with_psta and tokens.isnan().any()) and self.training:
            dbg_states = { "module": self, "inputs": [points, reference_index, st_info] }
            torch.save(dbg_states, f"nan_dbg_states_r{comm.get_rank()}.pth")
            raise ValueError("nan in feat")
        return [coord, feat, offset]


class BlockSequence(nn.Module):
    def __init__(
        self,
        depth,
        embed_channels,
        groups,
        neighbours=16,
        qkv_bias=True,
        pe_multiplier=False,
        pe_bias=True,
        attn_drop_rate=0.0,
        drop_path_rate=0.0,
        psta_cfg=None,
        enable_checkpoint=False,
        layer_idx=-1,
    ):
        super(BlockSequence, self).__init__()

        if isinstance(drop_path_rate, list):
            drop_path_rates = drop_path_rate
            assert len(drop_path_rates) == depth
        elif isinstance(drop_path_rate, float):
            drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)]
        else:
            drop_path_rates = [0.0 for _ in range(depth)]

        self.neighbours = neighbours
        self.blocks = nn.ModuleList()
        for i in range(depth):
            block = Block(
                embed_channels=embed_channels,
                groups=groups,
                qkv_bias=qkv_bias,
                pe_multiplier=pe_multiplier,
                pe_bias=pe_bias,
                attn_drop_rate=attn_drop_rate,
                drop_path_rate=drop_path_rates[i],
                psta_cfg=psta_cfg,
                enable_checkpoint=enable_checkpoint,
                block_idx=i,
                layer_idx=layer_idx,
            )
            self.blocks.append(block)

    def forward(self, points):
        coord, feat, offset = points
        # reference index query of neighbourhood attention
        # for windows attention, modify reference index query method
        reference_index, _ = pointops.knn_query(self.neighbours, coord, offset)
        for block in self.blocks:
            points = block(points, reference_index)
        return points


class GridPool(nn.Module):
    """
    Partition-based Pooling (Grid Pooling)
    """

    def __init__(self, in_channels, out_channels, grid_size, bias=False):
        super(GridPool, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.grid_size = grid_size

        self.fc = nn.Linear(in_channels, out_channels, bias=bias)
        self.norm = PointBatchNorm(out_channels)
        self.act = nn.ReLU(inplace=True)

    def forward(self, points, start=None):
        coord, feat, offset = points
        batch = offset2batch(offset)
        feat = self.act(self.norm(self.fc(feat)))
        start = (
            segment_csr(
                coord,
                torch.cat([batch.new_zeros(1), torch.cumsum(batch.bincount(), dim=0)]),
                reduce="min",
            )
            if start is None
            else start
        )
        cluster = voxel_grid(
            pos=coord - start[batch], size=self.grid_size, batch=batch, start=0
        )
        unique, cluster, counts = torch.unique(
            cluster, sorted=True, return_inverse=True, return_counts=True
        )
        _, sorted_cluster_indices = torch.sort(cluster)
        idx_ptr = torch.cat([counts.new_zeros(1), torch.cumsum(counts, dim=0)])
        coord = segment_csr(coord[sorted_cluster_indices], idx_ptr, reduce="mean")
        feat = segment_csr(feat[sorted_cluster_indices], idx_ptr, reduce="max")
        batch = batch[idx_ptr[:-1]]
        offset = batch2offset(batch)
        return [coord, feat, offset], cluster


class UnpoolWithSkip(nn.Module):
    """
    Map Unpooling with skip connection
    """

    def __init__(
        self,
        in_channels,
        skip_channels,
        out_channels,
        bias=True,
        skip=True,
        backend="map",
    ):
        super(UnpoolWithSkip, self).__init__()
        self.in_channels = in_channels
        self.skip_channels = skip_channels
        self.out_channels = out_channels
        self.skip = skip
        self.backend = backend
        assert self.backend in ["map", "interp"]

        self.proj = nn.Sequential(
            nn.Linear(in_channels, out_channels, bias=bias),
            PointBatchNorm(out_channels),
            nn.ReLU(inplace=True),
        )
        self.proj_skip = nn.Sequential(
            nn.Linear(skip_channels, out_channels, bias=bias),
            PointBatchNorm(out_channels),
            nn.ReLU(inplace=True),
        )

    def forward(self, points, skip_points, cluster=None):
        coord, feat, offset = points
        skip_coord, skip_feat, skip_offset = skip_points
        if self.backend == "map" and cluster is not None:
            feat = self.proj(feat)[cluster]
        else:
            feat = pointops.interpolation(
                coord, skip_coord, self.proj(feat), offset, skip_offset
            )
        if self.skip:
            feat = feat + self.proj_skip(skip_feat)
        return [skip_coord, feat, skip_offset]


class Encoder(nn.Module):
    def __init__(
        self,
        depth,
        in_channels,
        embed_channels,
        groups,
        grid_size=None,
        neighbours=16,
        qkv_bias=True,
        pe_multiplier=False,
        pe_bias=True,
        attn_drop_rate=None,
        drop_path_rate=None,
        psta_cfg=None,
        enable_checkpoint=False,
        layer_idx=-1,
    ):
        super(Encoder, self).__init__()

        self.down = GridPool(
            in_channels=in_channels,
            out_channels=embed_channels,
            grid_size=grid_size,
        )

        self.blocks = BlockSequence(
            depth=depth,
            embed_channels=embed_channels,
            groups=groups,
            neighbours=neighbours,
            qkv_bias=qkv_bias,
            pe_multiplier=pe_multiplier,
            pe_bias=pe_bias,
            attn_drop_rate=attn_drop_rate if attn_drop_rate is not None else 0.0,
            drop_path_rate=drop_path_rate if drop_path_rate is not None else 0.0,
            psta_cfg=psta_cfg,
            enable_checkpoint=enable_checkpoint,
            layer_idx=layer_idx,
        )

    def forward(self, points):
        points, cluster = self.down(points)
        return self.blocks(points), cluster


class Decoder(nn.Module):
    def __init__(
        self,
        in_channels,
        skip_channels,
        embed_channels,
        groups,
        depth,
        neighbours=16,
        qkv_bias=True,
        pe_multiplier=False,
        pe_bias=True,
        attn_drop_rate=None,
        drop_path_rate=None,
        psta_cfg=None,
        enable_checkpoint=False,
        unpool_backend="map",
        layer_idx=-1,
    ):
        super(Decoder, self).__init__()

        self.up = UnpoolWithSkip(
            in_channels=in_channels,
            out_channels=embed_channels,
            skip_channels=skip_channels,
            backend=unpool_backend,
        )

        self.blocks = BlockSequence(
            depth=depth,
            embed_channels=embed_channels,
            groups=groups,
            neighbours=neighbours,
            qkv_bias=qkv_bias,
            pe_multiplier=pe_multiplier,
            pe_bias=pe_bias,
            attn_drop_rate=attn_drop_rate if attn_drop_rate is not None else 0.0,
            drop_path_rate=drop_path_rate if drop_path_rate is not None else 0.0,
            psta_cfg=psta_cfg,
            enable_checkpoint=enable_checkpoint,
            layer_idx=layer_idx
        )

    def forward(self, points, skip_points, cluster):
        points = self.up(points, skip_points, cluster)
        return self.blocks(points)


class GVAPatchEmbed(nn.Module):
    def __init__(
        self,
        depth,
        in_channels,
        embed_channels,
        groups,
        neighbours=16,
        qkv_bias=True,
        pe_multiplier=False,
        pe_bias=True,
        attn_drop_rate=0.0,
        drop_path_rate=0.0,
        enable_checkpoint=False,
    ):
        super(GVAPatchEmbed, self).__init__()
        self.in_channels = in_channels
        self.embed_channels = embed_channels
        self.proj = nn.Sequential(
            nn.Linear(in_channels, embed_channels, bias=False),
            PointBatchNorm(embed_channels),
            nn.ReLU(inplace=True),
        )
        self.blocks = BlockSequence(
            depth=depth,
            embed_channels=embed_channels,
            groups=groups,
            neighbours=neighbours,
            qkv_bias=qkv_bias,
            pe_multiplier=pe_multiplier,
            pe_bias=pe_bias,
            attn_drop_rate=attn_drop_rate,
            drop_path_rate=drop_path_rate,
            enable_checkpoint=enable_checkpoint,
        )

    def forward(self, points):
        coord, feat, offset = points
        feat = self.proj(feat)
        return self.blocks([coord, feat, offset])

REPORT_INTERVAL = 400
NUM_REPORTS_KEEP = 2

@MODELS.register_module("PT-v2m2-SST")
class PointTransformerV2SST(nn.Module):
    def __init__(
        self,
        in_channels,
        num_classes,
        patch_embed_depth=1,
        patch_embed_channels=48,
        patch_embed_groups=6,
        patch_embed_neighbours=8,
        enc_depths=(2, 2, 6, 2),
        enc_channels=(96, 192, 384, 512),
        enc_groups=(12, 24, 48, 64),
        enc_neighbours=(16, 16, 16, 16),
        dec_depths=(1, 1, 1, 1),
        dec_channels=(48, 96, 192, 384),
        dec_groups=(6, 12, 24, 48),
        dec_neighbours=(16, 16, 16, 16),
        grid_sizes=(0.06, 0.12, 0.24, 0.48),
        attn_qkv_bias=True,
        pe_multiplier=False,
        pe_bias=True,
        attn_drop_rate=0.0,
        drop_path_rate=0,
        enable_checkpoint=False,
        unpool_backend="map",
        psta_cfg_enc=None,
        psta_cfg_dec=None,
        block_share_cnt=0, # 0 for not sharing, N for sharing N blocks in cycle per layer
        shared_modules=("ca_down", "ca_up"), # modules to be shared in each block
    ):
        super(PointTransformerV2SST, self).__init__()
        self.in_channels = in_channels
        self.num_classes = num_classes
        self.num_stages = len(enc_depths)
        assert self.num_stages == len(dec_depths)
        assert self.num_stages == len(enc_channels)
        assert self.num_stages == len(dec_channels)
        assert self.num_stages == len(enc_groups)
        assert self.num_stages == len(dec_groups)
        assert self.num_stages == len(enc_neighbours)
        assert self.num_stages == len(dec_neighbours)
        assert self.num_stages == len(grid_sizes)
        self.patch_embed = GVAPatchEmbed(
            in_channels=in_channels,
            embed_channels=patch_embed_channels,
            groups=patch_embed_groups,
            depth=patch_embed_depth,
            neighbours=patch_embed_neighbours,
            qkv_bias=attn_qkv_bias,
            pe_multiplier=pe_multiplier,
            pe_bias=pe_bias,
            attn_drop_rate=attn_drop_rate,
            enable_checkpoint=enable_checkpoint,
        )

        enc_dp_rates = [
            x.item() for x in torch.linspace(0, drop_path_rate, sum(enc_depths))
        ]
        dec_dp_rates = [
            x.item() for x in torch.linspace(0, drop_path_rate, sum(dec_depths))
        ]
        enc_channels = [patch_embed_channels] + list(enc_channels)
        dec_channels = list(dec_channels) + [enc_channels[-1]]
        self.enc_stages = nn.ModuleList()
        self.dec_stages = nn.ModuleList()
        for i in range(self.num_stages):
            enc = Encoder(
                depth=enc_depths[i],
                in_channels=enc_channels[i],
                embed_channels=enc_channels[i + 1],
                groups=enc_groups[i],
                grid_size=grid_sizes[i],
                neighbours=enc_neighbours[i],
                qkv_bias=attn_qkv_bias,
                pe_multiplier=pe_multiplier,
                pe_bias=pe_bias,
                attn_drop_rate=attn_drop_rate,
                drop_path_rate=enc_dp_rates[
                    sum(enc_depths[:i]) : sum(enc_depths[: i + 1])
                ],
                enable_checkpoint=enable_checkpoint,
                psta_cfg=psta_cfg_enc,
                layer_idx=i,
            )
            self.enc_stages.append(enc)
        # NOTE: some modules requires decoders should be constructed in run order
        for i in range(self.num_stages):
            j = self.num_stages - i - 1 # reverse decoder layer index
            dec = Decoder(
                depth=dec_depths[j],
                in_channels=dec_channels[j + 1],
                skip_channels=enc_channels[j],
                embed_channels=dec_channels[j],
                groups=dec_groups[j],
                neighbours=dec_neighbours[j],
                qkv_bias=attn_qkv_bias,
                pe_multiplier=pe_multiplier,
                pe_bias=pe_bias,
                attn_drop_rate=attn_drop_rate,
                drop_path_rate=dec_dp_rates[
                    sum(dec_depths[:j]) : sum(dec_depths[: j + 1])
                ],
                enable_checkpoint=enable_checkpoint,
                unpool_backend=unpool_backend,
                psta_cfg=psta_cfg_dec,
                layer_idx=i+self.num_stages,
            )
            self.dec_stages.append(dec)
        self.seg_head = (
            nn.Sequential(
                nn.Linear(dec_channels[0], dec_channels[0]),
                PointBatchNorm(dec_channels[0]),
                nn.ReLU(inplace=True),
                nn.Linear(dec_channels[0], num_classes),
            )
            if num_classes > 0
            else nn.Identity()
        )
        self.acc_iter = 0
        self.reports = []
        
        # merge shared layers
        def redirect_shared_modules(block_seq):
            if block_share_cnt <= 0:
                return
            for i, block in enumerate(block_seq):
                block: Block
                if i >= block_share_cnt:
                    base = block_seq[i%block_share_cnt]
                    for m in shared_modules:
                        assert hasattr(block, m) and hasattr(base, m)
                        block.add_module(m, getattr(base, m))
        for enc, dec in zip(self.enc_stages, self.dec_stages):
            redirect_shared_modules(enc.blocks.blocks)
            redirect_shared_modules(dec.blocks.blocks)
                

    def forward(self, data_dict):
        self.acc_iter += 1
        debug = self.acc_iter % REPORT_INTERVAL == 0 and comm.get_rank() == 0
        dbg_states = { "input": data_dict, "module": self }
        for i, enc in enumerate(self.enc_stages):
            enc: Encoder
            for j, b in enumerate(enc.blocks.blocks):
                b: Block
                b.debug = debug
        for i, dec in enumerate(self.dec_stages):
            dec: Decoder
            for j, b in enumerate(dec.blocks.blocks):
                b: Block
                b.debug = debug
        if debug:
            self.zero_grad(set_to_none=True)
        # if data_dict["coord"].isnan().any():
        #     raise ValueError("NaN in coord")
        
        coord = data_dict["coord"]
        feat = data_dict["feat"]
        offset = data_dict["offset"].int()

        # a batch of point cloud is a list of coord, feat and offset
        points = [coord, feat, offset]
        points = self.patch_embed(points)
        skips = [[points]]
        for i in range(self.num_stages):
            points, cluster = self.enc_stages[i](points)
            skips[-1].append(cluster)  # record grid cluster of pooling
            skips.append([points])  # record points info of current stage

        points = skips.pop(-1)[0]  # unpooling points info in the last enc stage
        for i in range(self.num_stages):
            skip_points, cluster = skips.pop(-1)
            points = self.dec_stages[i](points, skip_points, cluster)
        coord, feat, offset = points
        seg_logits = self.seg_head(feat)
        
        if debug and comm.get_rank() == 0 and self.training:
            for i, enc in enumerate(self.enc_stages):
                enc: Encoder
                for j, b in enumerate(enc.blocks.blocks):
                    b: Block
                    dbg_states[f"enc_l{i}_b{j}"] = b.dbg_states
            for i, dec in enumerate(self.dec_stages):
                dec: Decoder
                for j, b in enumerate(dec.blocks.blocks):
                    b: Block
                    dbg_states[f"dec_l{i}_b{j}"] = b.dbg_states
            path = f"debug_states_{self.acc_iter}iter.pth"
            torch.save(dbg_states, path)
            self.reports.append(path)
            if len(self.reports) > NUM_REPORTS_KEEP:
                rm_path = self.reports.pop(0)
                os.remove(rm_path)
        
        return seg_logits
