import torch
import torch.nn as nn 

from .segformer import SegFormer
from .segformer_prompt import SegFormerPrompt

from scipy.spatial.distance import cosine

from .cross_attn import CrossAttentionShort, CrossAttentionLong
import math 


class MirrorDecVideoPrompt(nn.Module):
    ## 输入N帧，将其他帧的特征在attention部分进行融合。
    def __init__(self, backbone) -> None:
        super().__init__()
        # if backbone == "segformer":
        self.backbone = SegFormerPrompt(pretrianed=True, scale_factor=1)

        # else :
        #     raise ImportError
        
        # self.cross_attn_short = CrossAttentionShort(512, 0.0, scale_factor=0.25)

        # self.fuse_layer = nn.Conv2d(2*512, 512, kernel_size=1, stride=1, padding=0)
        
    def forward(self, x: torch.Tensor):
            
        B, T, C, W, H = x.shape

        support_frame = torch.cat([x[:, 0], x[:, 2]], dim=1)
        ## five frames
        # support_frame = torch.cat([x[:, 0], x[:, 1], x[:, 3], x[:, 4]], dim=1)

        support_frame = support_frame.reshape(B*2, C, W, H)

        all_features = self.backbone.forward_features(support_frame)

        # decode_multi_scale_features = [fea.reshape(B, T, fea.shape[1], fea.shape[2], fea.shape[3])[:, 1] for fea in all_features]

        ## deep_features: (B*T, 512, 16, 16)
        deep_features = all_features[-1]
        C = deep_features.shape[1]

        ## B, 2, C, 16, 16
        support_frame_features = deep_features.reshape(B, 2, C, deep_features.shape[2] , deep_features.shape[3])

        target_frame = x[:, 1]

        logits = self.backbone(target_frame, prompt_features=support_frame_features)

        return logits



class MirrorDecAffinity(nn.Module):
    def __init__(self, backbone) -> None:
        super().__init__()
        if backbone == "segformer":
            self.backbone = SegFormer(pretrianed=True, scale_factor=1)

        else :
            raise ImportError
        
        self.cross_attn_short = CrossAttentionShort(512, 0.0, scale_factor=0.25)

        self.fuse_layer = nn.Conv2d(2*512, 512, kernel_size=1, stride=1, padding=0)
        
    def forward(self, x: torch.Tensor):
            
        B, T, C, W, H = x.shape
        x = x.reshape(B*T, C, W, H)

        all_features = self.backbone.forward_features(x)

        decode_multi_scale_features = [fea.reshape(B, T, fea.shape[1], fea.shape[2], fea.shape[3])[:, 1] for fea in all_features]

        ## deep_features: (B*T, 512, 16, 16)
        deep_features = all_features[-1]
        C = deep_features.shape[1]
        ## we need to convert the deepfeatures to a sequence to apply the cross attn.
        deep_features = deep_features.reshape(B, T, C, deep_features.shape[2] , deep_features.shape[3])

        ## 取出来左右两帧
        short_term_frames = torch.cat([deep_features[:, 0], deep_features[:, 2]], dim=1)
        ## 取出来中间帧
        target_frame = deep_features[:, 1:-1]

        # print(f"short_term_frames: {short_term_frames.shape}")
        # print(f"target_frame: {target_frame.shape}")

        fuse_short_term = self.cross_attn_short(target_frame, short_term_frames)

        
        # fuse_output = self.fuse_layer(torch.cat([fuse_short_term, fuse_long_term], dim=1))
        # print(fuse_short_term.shape)
        # print(fuse_long_term.shape)

        decode_multi_scale_features[-1] = fuse_short_term

        logits = self.backbone.decode_head(decode_multi_scale_features)

        return logits

from .cross_attn import CrossAttentionShortEfficient

class MirrorDecShortEfficientAttn(nn.Module):
    def __init__(self, backbone) -> None:
        super().__init__()
        if backbone == "segformer":
            self.backbone = SegFormer(pretrianed=True, scale_factor=1)

        else :
            raise ImportError
        
        self.cross_attn_short = CrossAttentionShortEfficient(512, 0.0, sr_ratio=1)
        
    def forward(self, x: torch.Tensor):
            
        B, T, C, W, H = x.shape
        x = x.reshape(B*T, C, W, H)

        all_features = self.backbone.forward_features(x)

        decode_multi_scale_features = [fea.reshape(B, T, fea.shape[1], fea.shape[2], fea.shape[3])[:, 1] for fea in all_features]

        ## deep_features: (B*T, 512, 16, 16)
        deep_features = all_features[-1]
        C = deep_features.shape[1]
        ## we need to convert the deepfeatures to a sequence to apply the cross attn.
        deep_features = deep_features.reshape(B, T, C, deep_features.shape[2] , deep_features.shape[3])

        short_term_frames = deep_features

        target_frame = deep_features[:, 1:-1]

        fuse_short_term = self.cross_attn_short(target_frame, short_term_frames)

        decode_multi_scale_features[-1] = fuse_short_term

        logits = self.backbone.decode_head(decode_multi_scale_features)

        return logits

class MirrorDecShort(nn.Module):
    def __init__(self, backbone) -> None:
        super().__init__()
        if backbone == "segformer":
            self.backbone = SegFormer(pretrianed=True, scale_factor=1)

        else :
            raise ImportError
        
        self.cross_attn_short = CrossAttentionShort(512, 0.0, scale_factor=1)

        self.fuse_layer = nn.Conv2d(2*512, 512, kernel_size=1, stride=1, padding=0)
        
    def forward(self, x: torch.Tensor):
            
        B, T, C, W, H = x.shape
        x = x.reshape(B*T, C, W, H)

        all_features = self.backbone.forward_features(x)

        decode_multi_scale_features = [fea.reshape(B, T, fea.shape[1], fea.shape[2], fea.shape[3])[:, 1] for fea in all_features]

        ## deep_features: (B*T, 512, 16, 16)
        deep_features = all_features[-1]
        C = deep_features.shape[1]
        ## we need to convert the deepfeatures to a sequence to apply the cross attn.
        deep_features = deep_features.reshape(B, T, C, deep_features.shape[2] , deep_features.shape[3])

        short_term_frames = deep_features

        target_frame = deep_features[:, 1:-1]

        # print(f"short_term_frames: {short_term_frames.shape}")
        # print(f"long_term_frames: {long_term_frames.shape}")
        # print(f"target_frame: {target_frame.shape}")

        fuse_short_term = self.cross_attn_short(target_frame, short_term_frames)
        # fuse_long_term = self.cross_attn_long(target_frame, long_term_frames)

        
        # fuse_output = self.fuse_layer(torch.cat([fuse_short_term, fuse_long_term], dim=1))
        # print(fuse_short_term.shape)
        # print(fuse_long_term.shape)

        decode_multi_scale_features[-1] = fuse_short_term

        logits = self.backbone.decode_head(decode_multi_scale_features)

        return logits

        pass

class MirrorDec(nn.Module):
    def __init__(self, backbone, max_id=9) -> None:
        super().__init__()
        if backbone == "segformer":
            self.backbone = SegFormer(pretrianed=True, scale_factor=1)

        else :
            raise ImportError
        
        self.cross_attn_short = CrossAttentionShort(512, 0.0, scale_factor=0.25)
        self.cross_attn_long = CrossAttentionLong(512, 0.0,)

        self.fuse_layer = nn.Conv2d(2*512, 512, kernel_size=1, stride=1, padding=0)

        self.max_id = max_id
        
    def forward(self, x: torch.Tensor, pred_id=5):
            
        ## x: (B, T, 3, W, H), T is 10
        if pred_id == 0:
            x = torch.cat([x, x[0:1]], dim=1)
            pred_id = 1
        
        if pred_id == self.max_id:
            x = torch.cat([x, x[-2:-1]], dim=1)

        B, T, C, W, H = x.shape
        x = x.reshape(B*T, C, W, H)

        all_features = self.backbone.forward_features(x)

        decode_multi_scale_features = [fea.reshape(B, T, fea.shape[1], fea.shape[2], fea.shape[3])[:, pred_id] for fea in all_features]

        ## deep_features: (B*T, 512, 16, 16)
        deep_features = all_features[-1]
        C = deep_features.shape[1]
        ## we need to convert the deepfeatures to a sequence to apply the cross attn.
        deep_features = deep_features.reshape(B, T, C, deep_features.shape[2] , deep_features.shape[3])

        short_term_frames = deep_features[:, pred_id-1:pred_id+2]
        long_term_frames = torch.cat([deep_features[:, :pred_id], deep_features[:, pred_id+1:]], dim=1)


        target_frame = deep_features[:, pred_id: pred_id+1]

        # print(f"short_term_frames: {short_term_frames.shape}")
        # print(f"long_term_frames: {long_term_frames.shape}")
        # print(f"target_frame: {target_frame.shape}")

        fuse_short_term = self.cross_attn_short(target_frame, short_term_frames)
        fuse_long_term = self.cross_attn_long(target_frame, long_term_frames)

        
        fuse_output = self.fuse_layer(torch.cat([fuse_short_term, fuse_long_term], dim=1))
        # print(fuse_short_term.shape)
        # print(fuse_long_term.shape)

        decode_multi_scale_features[-1] = fuse_output

        logits = self.backbone.decode_head(decode_multi_scale_features)

        return logits

        pass


