"""
原版复现
"""

import torch
import torch.nn as nn

# from model.visualEncoder import visualFrontend, visualTCN, visualConv1D
from model.attentionLayer import attentionLayer
from torchvggish import vggish
from model.visualEncoder import visualFrontend, visualConv1D, visualTCN
import torch.distributed as dist
from model.avconv import CONV_ATTN


class locoencoder(nn.Module):

    def __init__(self):
        super(locoencoder, self).__init__()

        # Visual Temporal Encoder
        self.visualFrontend = visualFrontend()  # Visual Frontend
        self.visualTCN = visualTCN()  # Visual Temporal Network TCN
        self.visualConv1D = visualConv1D()  # Visual Temporal Network Conv1d

        urls = {
            'vggish':
                "https://github.com/harritaylor/torchvggish/releases/download/v0.1/vggish-10086976.pth"
        }
        self.audioEncoder = vggish.VGGish(urls, preprocess=False, postprocess=False)
        self.audio_pool = nn.AdaptiveAvgPool1d(1)

        # Audio-visual Cross Attention
        self.crossA2V = attentionLayer(d_model=128, nhead=8)
        self.crossV2A = attentionLayer(d_model=128, nhead=8)

        # Audio-visual Self Attention

        self.speaker_embed = nn.Embedding(3, 256)
        self.conv_attn_sub1 = CONV_ATTN(num_blocks=1, embed_dim=256, num_sub=3, attn_type="S")
        self.conv_attn_time1 = CONV_ATTN(num_blocks=1, embed_dim=256, num_sub=3, attn_type="T")
        self.conv_attn_sub2 = CONV_ATTN(num_blocks=1, embed_dim=256, num_sub=3, attn_type="S")
        self.conv_attn_time2 = CONV_ATTN(num_blocks=1, embed_dim=256, num_sub=3, attn_type="T")
        self.crossT2S1 = attentionLayer(d_model=256, nhead=8)
        self.crossS2T1 = attentionLayer(d_model=256, nhead=8)
        self.crossT2S2 = attentionLayer(d_model=256, nhead=8)
        self.crossS2T2 = attentionLayer(d_model=256, nhead=8)

    def forward_visual_frontend(self, x):
        # x: [B*S, T, H, W] (after view in Loconet.forward)
        B, T, W, H = x.shape
        x = x.view(B * T, 1, 1, W, H)  # Reshape for 3D Conv input
        x = (x / 255 - 0.4161) / 0.1688
        x = self.visualFrontend(x)  # Output of visualFrontend is [B*T, 512, 1, 1] or similar
        x = x.view(B, T, 512)  # Reshape back to [B, T, C]
        x = x.transpose(1, 2)  # Transpose to [B, C, T] for 1D Conv
        x = self.visualTCN(x)
        x = self.visualConv1D(x)

        x = x.transpose(1, 2)  # Transpose back to [B, T, C]
        return x

    def forward_audio_frontend(self, x):
        # x: [B, C, T] (audioFeature)
        t = x.shape[-2]  # Assuming shape is [B, channels, time_steps]
        numFrames = t // 4  # This seems to be a specific downsampling factor
        pad = 8 - (t % 8)
        x = torch.nn.functional.pad(x, (0, 0, 0, pad), "constant")  # Pad to be divisible by 8
        x = self.audioEncoder(x)  # VGGish output is typically [B, C, T', F'] or [B, T', C]
        b, c, t2, freq = x.shape  # Assuming VGGish output is [B, C, T', F']
        x = x.view(b * c, t2, freq)  # Reshape for pooling
        x = self.audio_pool(x)  # AdaptiveAvgPool1d(1) will reduce last dim to 1
        x = x.view(b, c, t2)[:, :, :numFrames]  # Reshape and truncate
        x = x.permute(0, 2, 1)  # Permute to [B, T, C]
        return x

    def forward_cross_attention(self, x1, x2):
        #  [S, T, 128] → [S, T, 128]
        x1_c = self.crossA2V(src=x1, tar=x2, adjust=0)
        #  [S, T, 128] → [S, T, 128]
        x2_c = self.crossV2A(src=x2, tar=x1, adjust=0)

        return x1_c, x2_c

    def forward_audio_visual_backend(self, x1, x2, b=1, s=1):
        #  [S, T, 128] + [S, T, 128] → [S, T, 256]
        x_ori = torch.cat((x1, x2), 2)

        x1_sub = x_ori.permute(1, 0, 2)
        speaker_ids = torch.arange(3, device=x1_sub.device)
        speaker_embed = self.speaker_embed(speaker_ids)
        x1_sub += speaker_embed
        x1_sub = x1_sub.permute(1, 0, 2)

        x1_sub = self.conv_attn_sub1(x1_sub)

        x1_time = x_ori
        x1_time = self.conv_attn_time1(x1_time)

        x2_time = self.crossT2S1(src=x1_time, tar=x1_sub, adjust=0)
        x2_sub = self.crossS2T1(src=x1_sub, tar=x1_time, adjust=0)

        x2_sub = self.conv_attn_sub2(x2_sub)
        x2_time = self.conv_attn_time2(x2_time)

        result_time = self.crossT2S2(src=x2_time, tar=x2_sub, adjust=0)
        result_sub = self.crossS2T2(src=x2_sub, tar=x2_time, adjust=0)
        
        result = result_time + result_sub
        result = torch.reshape(result, (-1, 256))
        
        return result, -1

    def forward_audio_backend(self, x):
        # [S, T, 128] → [S*T, 128]
        x = torch.reshape(x, (-1, 128))
        return x

    def forward_visual_backend(self, x):
        # [S, T, 128] → [S*T, 128]
        x = torch.reshape(x, (-1, 128))  # Flatten to [N, 128]
        return x
