import os

import torch
import torch.nn.functional as F

from model.clip import CLIP
from model.clip.clip import tokenize
from model.esresnet import ESResNeXtFBSP
from model.vivit import ViViT, MuTT

from typing import List
from typing import Tuple
from typing import Union
from typing import Optional

import random
import math

import warnings

# 忽略所有警告
warnings.filterwarnings("ignore")


ClipFeatures = Tuple[
    Optional[torch.Tensor],  # audio
    Optional[torch.Tensor],  # image
    Optional[torch.Tensor]   # audio
]


ClipLogits = Tuple[
    Optional[torch.Tensor],  # audio x image
    Optional[torch.Tensor],  # audio x text
    Optional[torch.Tensor]   # image x text
]


ClipOutput = Tuple[
    Tuple[ClipFeatures, ClipLogits],
    Optional[torch.Tensor]   # loss
]

class MusicVideoCLIP(CLIP):

    def __init__(self,
                 embed_dim: int = 1024,
                 # vision
                 image_resolution: int = 224,
                 vision_layers: Union[Tuple[int, int, int, int], int] = (3, 4, 6, 3),
                 vision_width: int = 64,
                 vision_patch_size: Optional[int] = None,
                 
                 ############引入的地方 每个视频/音乐5s 一组(一段5s,总共150帧(30 帧/s))分为10段，随机取一帧############
                 # video&music
                 num_segments_inside = 10, 
                 num_segments = 5, 
                 ################################################################################################
                 # text
                 context_length: int = 77,
                 vocab_size: int = 49408,
                 transformer_width: int = 512,
                 transformer_heads: int = 8,
                 transformer_layers: int = 12,
                 # audio
                 n_fft: int = 2048,
                 hop_length: Optional[int] = 561,
                 win_length: Optional[int] = 1654,
                 window: Optional[str] = 'blackmanharris',
                 normalized: bool = True,
                 onesided: bool = True,
                 spec_height: int = -1,
                 spec_width: int = -1,
                 apply_attention: bool = True,
                 multilabel: bool = True,
                 pretrained: Union[bool, str] = True):

        super(MusicVideoCLIP, self).__init__(
            embed_dim=embed_dim,
            image_resolution=image_resolution,
            vision_layers=vision_layers,
            vision_width=vision_width,
            vision_patch_size=vision_patch_size,
            context_length=context_length,
            vocab_size=vocab_size,
            transformer_width=transformer_width,
            transformer_heads=transformer_heads,
            transformer_layers=transformer_layers
        )
        
        #原作者居然没写device，bug找了半天。——>下面device的修饰器加上就行
#         self.device = self.visual.conv1.weight.device

        self.audio = ESResNeXtFBSP(
            n_fft=n_fft,
            hop_length=hop_length,
            win_length=win_length,
            window=window,
            normalized=normalized,
            onesided=onesided,
            spec_height=spec_height,
            spec_width=spec_width,
            num_classes=embed_dim,
            apply_attention=apply_attention,
            pretrained=False
        )
        
        
        ###################引入视频、音乐特征提取器####################
        #引入vivit
        self.video = ViViT(
            num_segments_inside = num_segments_inside,
            num_segments = num_segments
        )
        
        #引入MusicTemporalTransformer
        self.music = MuTT(
            num_segments = num_segments
        )
        #############################################################
        
        
        self.multilabel = multilabel
        self.pretrained = pretrained
        
        self.logit_scale_ai = torch.nn.Parameter(torch.log(torch.ones([]) * 100))
        self.logit_scale_at = torch.nn.Parameter(torch.log(torch.ones([]) * 100))
        
        
        ##########################新增的光流——>需要计算的模态########################
        self.logit_scale_mv = torch.nn.Parameter(torch.log(torch.ones([]) * 100))
        self.logit_scale_mt = torch.nn.Parameter(torch.log(torch.ones([]) * 100))
        self.logit_scale_vt = torch.nn.Parameter(torch.log(torch.ones([]) * 100))
        self.logit_scale_mv_align = torch.nn.Parameter(torch.log(torch.ones([]) * 100))

        ###########################################################################
        
        
    # 修饰器？ @property装饰器会将方法转换为相同名称的只读属性,可以与所定义的属性配合使用，这样可以防止属性被修改。
    @property
    def device(self):
        return self.visual.conv1.weight.device

    def encode_audio(self, audio: torch.Tensor) -> torch.Tensor:
        return self.audio(audio.to(self.device))
    
    
    #########################编码器##############################
    #定义video和music的编码器
    def encode_video(self, video: torch.Tensor) -> torch.Tensor:
        return self.video(video.to(self.device))  ## tensor
    
    def encode_music(self, music: torch.Tensor) -> torch.Tensor:
        return self.music(music.to(self.device))  ## tensor 
    
    #############################################################
    
    
    def encode_text(self,
                    text: List[List[str]],
                    base_str: str = '{}',
                    batch_indices: Optional[torch.Tensor] = None) -> torch.Tensor:

        if batch_indices is not None:
            text = [text[idx] for idx in batch_indices]

        text_joined = [', '.join(entities) for entities in text]
        text_tokens = torch.cat([
            tokenize(base_str.format(entities)) for entities in text_joined
        ])
        text_tokens = text_tokens.to(self.device)

        return super(MusicVideoCLIP, self).encode_text(text_tokens)

    
    #######################前向过程及其损失########################
    def forward(self,
                music: Optional[torch.Tensor] = None,
                video: Optional[torch.Tensor] = None,
                text: Optional[List[List[str]]] = None,
                batch_indices: Optional[torch.Tensor] = None) -> ClipOutput:
        
        text = None
        
        total_audio_features = None
        total_image_features = None
        
        music_features = None
        video_features = None
        text_features = None
        sample_weights = None
        

        #先用预训练好的模型得到所有segments的特征
        #这里我采用展平的方式
        #将[batch x segments x channels x height x width]中的batch x segments合并
        #处理完后再reshape回来，这样可以并行计算，节约资源。
        
        #先处理音乐，得到music_original_features，然后送入transformer
        
        
        ###########################################################
        #此处需要修改batch 在dataloader加载的时候为批量处理
        #
        ###########################################################
        
        
        if music is not None:
            
            #music——>music_original_features:
            #[batch_music, ...]，music是stack了的
            batch_size = len(music)
            num_segments = 6
            stacked_music = torch.stack(music)
            flat_music = stacked_music.view(-1, 1, 44100)
            music_features = self.encode_audio(flat_music)
            music_features = music_features / music_features.norm(dim=-1, keepdim=True)
            print("music processed!")
            
                #[batch_size x num_segments, 1024]，音乐数 * 每个音乐的段数
            music_original_features = music_features.view(batch_size, num_segments, -1)
            print('music_original_features.size():', music_original_features.size())
            
            #music_original_features——>music_features:
            audio_features, music_features_for_align, music_features = self.encode_music(music_original_features)
            #把audio_features的batch_size和num_segments合并就得到了所有段落的特征
            print('audio_features.size():', audio_features.size())
            total_audio_features = audio_features.reshape(-1, 1024)
            print('total_audio_features.size():', total_audio_features.size())
            
            
        #############################再处理视频帧#################################
        #首先实现间隔随机抽帧及对应的光流图
        #然后将帧展平送入图片头得到video_rgb_original_features
        #将光流图展平送入图片头得到video_flow_original_features
        #########################################################################
        if video is not None:
            
            #Randomly selected video frames
            #[rgb/flow_x/flow_y, batch_size, frames_num, 3, 224, 224]
            #[ batch_video, video[video[0][ num, 3, 224, 224 ], video[1][ num, 3, 224, 224 ], video[2][ num, 3, 224, 224 ]]]
            #video[i]是stack了的，video是list
            batch_size = len(video)
            video_rgb, video_flow_x, video_flow_y = video[:, 0], video[:, 1], video[:, 2]
            num_segments = 6
            num_segments_inside = 5 #段内分段
            video_rgb_selected = []
            video_flow_x_selected = []
            video_flow_y_selected = []
            
            #########################对于每个batch############################
            for i in range(batch_size):
                
                

                frames_num = min(video_rgb[i].size(0), video_flow_x[i].size(0), video_flow_y[i].size(0))
                print('frames_num:', frames_num)
                segment_size = frames_num / (num_segments*num_segments_inside)
                selected_frames = []
                selected_frames_rgb = []
                selected_frames_flow_x = []
                selected_frames_flow_y = []
                
                
                ##################随机取帧针对单个帧#####################
                for j in range(num_segments*num_segments_inside):
                    start_idx = math.floor(j * segment_size)
                    end_idx = math.floor((j + 1) * segment_size) if j != num_segments*num_segments_inside - 1 else frames_num
                    
                    
                    #################视频随机取一帧及其后面一帧######################
                    selected_frame_idx = random.randint(start_idx, end_idx - 1)
#                     print('selected_frame_idx:', selected_frame_idx)
                    
                    selected_frame_rgb = video_rgb[i][selected_frame_idx]
                    selected_frames_rgb.append(selected_frame_rgb)
                    selected_frame_flow_x = video_flow_x[i][selected_frame_idx]
                    selected_frames_flow_x.append(selected_frame_flow_x)
                    selected_frame_flow_y = video_flow_y[i][selected_frame_idx]
                    selected_frames_flow_y.append(selected_frame_flow_y)          

                ###############将选中的帧张量堆叠起来形成最终的张量#################
                selected_frames_rgb_tensor = torch.stack(selected_frames_rgb)
                video_rgb_selected.append(selected_frames_rgb_tensor)
                selected_frames_flow_x_tensor = torch.stack(selected_frames_flow_x)
                video_flow_x_selected.append(selected_frames_flow_x_tensor)
                selected_frames_flow_y_tensor = torch.stack(selected_frames_flow_y)
                video_flow_y_selected.append(selected_frames_flow_y_tensor)
                #######################此处得到图像的数据#########################
                
               
            ######################encode 图像为 224*224 作为输入#######################
            video_rgb_selected = torch.stack(video_rgb_selected)
            flat_video_rgb = video_rgb_selected.view(-1, 3, 224, 224)
            
            video_features_rgb = self.encode_image(flat_video_rgb)
            ## 归一化
            video_features_rgb = video_features_rgb / video_features_rgb.norm(dim=-1, keepdim=True) 
            
            ######################这里对flow的编码器需要替换，按照TSN方法#################
            ##                           分别对光流的x,y进行                          ##
            ##########################################################################
            
            #########################flow x######################
            video_flow_x_selected = torch.stack(video_flow_x_selected)
            flat_video_flow_x = video_flow_x_selected.view(-1, 3, 224, 224)
            video_features_flow_x = self.encode_image(flat_video_flow_x)
            ## 归一化
            video_features_flow_x = video_features_flow_x / video_features_flow_x.norm(dim=-1, keepdim=True)
            
            ########################flow y#######################
            video_flow_y_selected = torch.stack(video_flow_y_selected)
            flat_video_flow_y = video_flow_y_selected.view(-1, 3, 224, 224)
            video_features_flow_y = self.encode_image(flat_video_flow_y)
            video_features_flow_y = video_features_flow_y / video_features_flow_y.norm(dim=-1, keepdim=True)
            
            #################video 合成rgb+flow x y特征##################
            video_features = video_features_rgb + video_features_flow_x + video_features_flow_y
            video_features = video_features / video_features.norm(dim=-1, keepdim=True)
            print("video processed!")
            
            
            
            #[batch_size x num_segments x num_segments_inside, 1024]
            video_original_features = video_features.view(batch_size, num_segments, num_segments_inside, -1)
            print('video_original_features.size():', video_original_features.size())
            
            
            ##################################################################################################
            # 计算num_segments_inside之间的cls特征
            # 使得[batch_size x num_segments x num_segments_inside, 1024] ——> [batch_size x num_segments, 1024]
            # 再计算num_segments之间的cls特征
            # 使得[batch_size x num_segments, 1024] ——> [batch_size, 1024]
            ##################################################################################################
            image_features, video_features_for_align, video_features = self.encode_video(video_original_features)
            #把image_features的batch_size和num_segments合并就得到了所有段落的特征
            total_image_features = image_features.reshape(-1, 1024)
            print('total_image_features.size():', total_image_features.size())
            

        if text is not None:
            if batch_indices is None:
                batch_indices = torch.arange(len(text), dtype=torch.int64, device=self.device)

            text_features = self.encode_text(text, '{}', batch_indices)
            text_features = text_features / text_features.norm(dim=-1, keepdim=True)

            if hasattr(self, 'class_weights') and hasattr(self, 'label_to_class_idx'):
                sample_weights = torch.stack([
                    sum(self.class_weights[self.label_to_class_idx[label]] for label in entities)
                    for idx, entities in enumerate(text) if idx in batch_indices
                ])
                
                
        ##############此处需要说明以下几个image，audio。text和video，music，text之间的区别###############
        
        
        features = (total_audio_features, total_image_features, text_features, music_features, video_features, music_features_for_align, video_features_for_align)
        
        #######################第6层时进行align，计算视频&音乐level的对比损失############
        logit_scale_mv_align = torch.clamp(self.logit_scale_mv_align.exp(), min=1.0, max=100.0)
        
        logits_music_video_align = None
        
        if (music_features_for_align is not None) and (video_features_for_align is not None):
            logits_music_video_align = logit_scale_mv_align * music_features_for_align @ video_features_for_align.T
        
        ##########################同初始的，计算整体的音频&照片level的对比损失#####################
        
        logit_scale_ai = torch.clamp(self.logit_scale_ai.exp(), min=1.0, max=100.0)
        logit_scale_at = torch.clamp(self.logit_scale_at.exp(), min=1.0, max=100.0)
        logit_scale_it = torch.clamp(self.logit_scale.exp(), min=1.0, max=100.0)

        logits_audio_image = None
        logits_audio_text = None
        logits_image_text = None
        
        if (total_audio_features is not None) and (total_image_features is not None):
            logits_audio_image = logit_scale_ai * total_audio_features @ total_image_features.T

        if (total_audio_features is not None) and (text_features is not None):
            logits_audio_text = logit_scale_at * total_audio_features @ text_features.T

        if (total_image_features is not None) and (text_features is not None):
            logits_image_text = logit_scale_it * total_image_features @ text_features.T
        
        #########################新增加的：计算视频&音乐level的对比损失###########################
        
        logit_scale_mv = torch.clamp(self.logit_scale_mv.exp(), min=1.0, max=100.0)
        logit_scale_mt = torch.clamp(self.logit_scale_mt.exp(), min=1.0, max=100.0)
        logit_scale_vt = torch.clamp(self.logit_scale_vt.exp(), min=1.0, max=100.0)
        
        logits_music_video = None
        logits_music_text = None
        logits_video_text = None
        
        if (music_features is not None) and (video_features is not None):
            logits_music_video = logit_scale_mv * music_features @ video_features.T

        if (music_features is not None) and (text_features is not None):
            logits_music_text = logit_scale_mt * music_features @ text_features.T

        if (video_features is not None) and (text_features is not None):
            logits_video_text = logit_scale_vt * video_features @ text_features.T
            
        #----------

        logits = (logits_music_video_align, logits_audio_image, logits_audio_text, logits_image_text, logits_music_video, logits_music_text, logits_video_text)
        
        loss, align_loss, total_loss, segment_loss = self.loss_fn(logits, sample_weights)
        
#         if audio is not None and loss is not None:
#             loss = loss + self.audio.loss_ttf(self.device)

        return (features, logits), (loss, align_loss, total_loss, segment_loss)



    def loss_fn(self, logits: ClipLogits, sample_weights: Optional[torch.Tensor] = None) -> Optional[torch.Tensor]:
        
        logits_music_video_align, logits_audio_image, logits_audio_text, logits_image_text, logits_music_video, logits_music_text, logits_video_text = logits

        #计算align_loss
        if logits_music_video_align is not None:
            batch_size = logits_music_video_align.shape[0]
        else:
            return None

        reference = torch.arange(
            batch_size,
            dtype=torch.int64,
            device=self.device
        )

        align_loss = torch.tensor(0.0, dtype=self.dtype, device=self.device)

        num_modalities: int = 0
        scale = torch.tensor(1.0, dtype=self.dtype, device=self.device)

        if logits_music_video_align is not None:
            loss_mv_align = F.cross_entropy(
                logits_music_video_align, reference, weight=sample_weights
            ) + F.cross_entropy(
                logits_music_video_align.transpose(-1, -2), reference, weight=sample_weights
            )
            align_loss = align_loss + loss_mv_align
            num_modalities += 1

        for idx in range(num_modalities):
            scale = scale * (idx + 1)
        align_loss = align_loss / scale
        
        #计算segment_loss
        if logits_audio_image is not None:
            batch_size = logits_audio_image.shape[0]
        elif logits_audio_text is not None:
            batch_size = logits_audio_text.shape[0]
        elif logits_image_text is not None:
            batch_size = logits_image_text.shape[0]
        else:
            return None

        reference = torch.arange(
            batch_size,
            dtype=torch.int64,
            device=self.device
        )
        
        ###########################远视眼机制：masked receptive field实现：#############################
        #远视眼机制：masked receptive field实现：
        #进行mask，将两旁的元素置为-inf，这样送入F.cross_entropy计算softmax时，对应概率则变为0
        logits_audio_image_masked = logits_audio_image
        logits_audio_image_masked[torch.arange(batch_size-1), torch.arange(1, batch_size)] = float('-inf')
        logits_audio_image_masked[torch.arange(1, batch_size), torch.arange(batch_size-1)] = float('-inf')
        print('logits_audio_image_masked:', logits_audio_image_masked)
        logits_audio_image = logits_audio_image_masked
        ##############################################################################################

        segment_loss = torch.tensor(0.0, dtype=self.dtype, device=self.device)

        num_modalities: int = 0
        scale = torch.tensor(1.0, dtype=self.dtype, device=self.device)

        if logits_audio_image is not None:
            loss_ai = F.cross_entropy(
                logits_audio_image, reference, weight=sample_weights
            ) + F.cross_entropy(
                logits_audio_image.transpose(-1, -2), reference, weight=sample_weights
            )
            segment_loss = segment_loss + loss_ai
            num_modalities += 1

        if logits_audio_text is not None:
            loss_at = F.cross_entropy(
                logits_audio_text, reference, weight=sample_weights
            ) + F.cross_entropy(
                logits_audio_text.transpose(-1, -2), reference, weight=sample_weights
            )
            segment_loss = segment_loss + loss_at
            num_modalities += 1

        if logits_image_text is not None:
            loss_it = F.cross_entropy(
                logits_image_text, reference, weight=sample_weights
            ) + F.cross_entropy(
                logits_image_text.transpose(-1, -2), reference, weight=sample_weights
            )
            segment_loss = segment_loss + loss_it
            num_modalities += 1

        for idx in range(num_modalities):
            scale = scale * (idx + 1)
        segment_loss = segment_loss / scale
        
        #######################计算total_loss##########################
        if logits_music_video is not None:
            batch_size = logits_music_video.shape[0]
        elif logits_music_text is not None:
            batch_size = logits_music_text.shape[0]
        elif logits_video_text is not None:
            batch_size = logits_video_text.shape[0]
        else:
            return None
        ##############################################################
        reference = torch.arange(
            batch_size,
            dtype=torch.int64,
            device=self.device
        )

        total_loss = torch.tensor(0.0, dtype=self.dtype, device=self.device)

        num_modalities: int = 0
        scale = torch.tensor(1.0, dtype=self.dtype, device=self.device)

        if logits_music_video is not None:
            loss_mv = F.cross_entropy(
                logits_music_video, reference, weight=sample_weights
            ) + F.cross_entropy(
                logits_music_video.transpose(-1, -2), reference, weight=sample_weights
            )
            total_loss = total_loss + loss_mv
            num_modalities += 1

        if logits_music_text is not None:
            loss_mt = F.cross_entropy(
                logits_music_text, reference, weight=sample_weights
            ) + F.cross_entropy(
                logits_music_text.transpose(-1, -2), reference, weight=sample_weights
            )
            total_loss = total_loss + loss_mt
            num_modalities += 1

        if logits_video_text is not None:
            loss_vt = F.cross_entropy(
                logits_video_text, reference, weight=sample_weights
            ) + F.cross_entropy(
                logits_video_text.transpose(-1, -2), reference, weight=sample_weights
            )
            total_loss = total_loss + loss_vt
            num_modalities += 1

        for idx in range(num_modalities):
            scale = scale * (idx + 1)
        total_loss = total_loss / scale
        
        loss = segment_loss + total_loss + align_loss

        return loss, align_loss, total_loss, segment_loss

    @property
    def loss_fn_name(self) -> str:
        return 'Cross Entropy'
