"""
改位置编码，在阔模态之前加入
concat改为add，256->128
1层 三轨道
"""
import torch
import torch.nn as nn

# from model.visualEncoder import visualFrontend, visualTCN, visualConv1D
from model.attentionLayer import attentionLayer
from torchvggish import vggish
from model.visualEncoder import visualFrontend, visualConv1D, visualTCN
import torch.distributed as dist
from model.avconv import CONV_ATTN
from model.windowAtt import WindowAttention
import swanlab

class locoencoder(nn.Module):

    def __init__(self):
        super(locoencoder, self).__init__()

        # Visual Temporal Encoder
        self.visualFrontend = visualFrontend()  # Visual Frontend
        self.visualTCN = visualTCN()  # Visual Temporal Network TCN
        self.visualConv1D = visualConv1D()  # Visual Temporal Network Conv1d

        urls = {
            'vggish':
                "https://github.com/harritaylor/torchvggish/releases/download/v0.1/vggish-10086976.pth"
        }
        self.audioEncoder = vggish.VGGish(urls, preprocess=False, postprocess=False)
        self.audio_pool = nn.AdaptiveAvgPool1d(1)

        # Audio-visual Cross Attention
        self.crossA2V = attentionLayer(d_model=128, nhead=8)
        self.crossV2A = attentionLayer(d_model=128, nhead=8)

        self.speaker_embed = nn.Embedding(3, 128)
        
        self.conv_attn_sub1 = CONV_ATTN(num_blocks=1, embed_dim=128, num_sub=3, attn_type="S")
        self.conv_attn_time1 = CONV_ATTN(num_blocks=1, embed_dim=128, num_sub=3, attn_type="T")
        self.conv_attn_window1 = WindowAttention(dim=128, num_heads=8, window_size=7)

        self.crossT2S1 = attentionLayer(d_model=128, nhead=8)
        self.crossS2T1 = attentionLayer(d_model=128, nhead=8)

        
        #门控参数 (x1 vs x2 融合用)
        self.gate_linear = nn.Linear(128 * 2, 128)
        #门控参数 (result_time vs result_sub 融合用)
        self.result_gate = nn.Linear(128 * 2, 128)
        #门控参数 (window vs sub&time 融合用)
        self.window_gate = nn.Linear(128 * 2, 128)

    def forward_visual_frontend(self, x):
        # x: [B*S, T, H, W] (after view in Loconet.forward)

        B, T, W, H = x.shape
        x = x.view(B * T, 1, 1, W, H)  # Reshape for 3D Conv input
        x = (x / 255 - 0.4161) / 0.1688
        x = self.visualFrontend(x)  # Output of visualFrontend is [B*T, 512, 1, 1] or similar
        x = x.view(B, T, 512)  # Reshape back to [B, T, C]
        x = x.transpose(1, 2)  # Transpose to [B, C, T] for 1D Conv
        x = self.visualTCN(x)
        x = self.visualConv1D(x)
        x = x.transpose(1, 2)  # Transpose back to [B, T, C]
        return x

    def forward_audio_frontend(self, x):
        # x: [B, C, T] (audioFeature)
        t = x.shape[-2]  # Assuming shape is [B, channels, time_steps]
        numFrames = t // 4  # This seems to be a specific downsampling factor
        pad = 8 - (t % 8)
        x = torch.nn.functional.pad(x, (0, 0, 0, pad), "constant")  # Pad to be divisible by 8
        x = self.audioEncoder(x)  # VGGish output is typically [B, C, T', F'] or [B, T', C]
        b, c, t2, freq = x.shape  # Assuming VGGish output is [B, C, T', F']
        x = x.view(b * c, t2, freq)  # Reshape for pooling
        x = self.audio_pool(x)  # AdaptiveAvgPool1d(1) will reduce last dim to 1
        x = x.view(b, c, t2)[:, :, :numFrames]  # Reshape and truncate
        x = x.permute(0, 2, 1)  # Permute to [B, T, C]
        return x
    
    def add_speaker_embedding(self, x, speaker_ids=torch.tensor([0, 1, 2])):  # FIXME: 被填充的相同的人呢？ xjh
        """
        给特征加上 speaker embedding (方案A: 直接 [S,128])
        x: [S, T, C=128]
        speaker_ids: [S] 说话人ID
        """
        speaker_ids = speaker_ids.to(x.device)
        spk_emb = self.speaker_embed(speaker_ids)        # [S, 128] 直接学到目标维度
        spk_emb_expanded = spk_emb.unsqueeze(1).expand(-1, x.size(1), -1)  # [S, T, 128]
        return x + spk_emb_expanded

    def forward_cross_attention(self, x1, x2):
        """
        在跨模态融合前加入 speaker embedding  # xjh
        x1: visual [S, T, 128]
        x2: audio  [S, T, 128]
        """
        # ----------加speaker embedding----------
        x1 = self.add_speaker_embedding(x1)   # visual conditioned
        x2 = self.add_speaker_embedding(x2)   # audio conditioned
        # ---------------------------------------
        
        #  [S, T, 128] → [S, T, 128]
        x1_c = self.crossA2V(src=x1, tar=x2, adjust=0)
        #  [S, T, 128] → [S, T, 128]
        x2_c = self.crossV2A(src=x2, tar=x1, adjust=0)
        return x1_c, x2_c

    def forward_audio_visual_backend(self, x1, x2, b=1, s=1):
        #  [S, T, 128] + [S, T, 128] → [S, T, 128]
        
        # 门控加权add
        ori_gate = torch.sigmoid(self.gate_linear(torch.cat((x1, x2), dim=2)))  # [S,T,128]
        x_ori = ori_gate * x1 + (1 - ori_gate) * x2  # [S,T,128]
        
        # 分支
        x1_time, x1_sub = x_ori, x_ori  # [S, T, 128]
    
        # -------------1层3轨-------------
        # 多人单帧交互track
        x1_sub = self.conv_attn_sub1(x1_sub) # 内部会将 x1_sub 从 [S, T, 128] permute [T, S, 128]
        # 单人全帧交互track
        x1_time = self.conv_attn_time1(x1_time)
        # 多人多帧交互track
        x1_wd = self.conv_attn_window1(x_ori)
        
        x2_sub = self.crossS2T1(src=x1_sub, tar=x1_time, adjust=0) # x1_sub(q) 和 x1_time(kv) 做交叉注意力
        x2_time = self.crossT2S1(src=x1_time, tar=x1_sub, adjust=0) # x1_time(q) 和 x1_sub(kv) 做交叉注意力
        
        result_time, result_sub, result_wd = x2_time, x2_sub, x1_wd

        # 门控加权融合sub_track和time_track
        # result = result_time + result_sub
        ts_gate = torch.sigmoid(self.result_gate(torch.cat((result_time, result_sub), dim=2)))  # [S,T,128]
        result_time_sub = ts_gate * result_time + (1 - ts_gate) * result_sub  # [S,T,128]
        
        # 门控加权融合time_sub_res和window_track
        win_gate = torch.sigmoid(self.window_gate(torch.cat((result_wd, result_time_sub), dim=2)))  # [S,T,128]
        result = win_gate * result_wd + (1 - win_gate) * result_time_sub  # [S,T,128]
        
        result = torch.reshape(result, (-1, 128))
        
        # return result 
        return result, ori_gate.mean().item(), ts_gate.mean().item(), win_gate.mean().item() # 返回多个用于swanlab监控门控值

    def forward_audio_backend(self, x):
        # [S, T, 128] → [S*T, 128]
        x = torch.reshape(x, (-1, 128))
        return x

    def forward_visual_backend(self, x):
        # [S, T, 128] → [S*T, 128]
        x = torch.reshape(x, (-1, 128))  # Flatten to [N, 128]
        return x
