from torchvision import models
from torch import nn
import torch
import enum
from .convnext import convnextv2
import torch.nn.functional as F
from pytorch_lightning import LightningModule
from torchmetrics.classification import MulticlassAccuracy

class LSTMHead(nn.Module):
    """LSTM分类头

    *input*

    (B, L, input_size) 或 (L, input_size)

    *output*

    [(B, L, output_size)] 或 [(L, output_size)]
    """

    def __init__(self, input_size: int, output_size: int, lstm_hidden_size=512):
        """LSTMHead初始化

        Args:
            input_size (int): 输入序列的特征维数
            output_size (int): 输出序列的特征维数
            lstm_hidden_size (int, optional): 内部LSTM使用的隐藏态维数
        """
        super().__init__()

        self.input_size = input
        self.output_size = output_size
        self.lstm_hidden_size = lstm_hidden_size

        self.lstm = nn.LSTM(input_size, lstm_hidden_size, batch_first=True)
        self.out_layer = nn.Linear(lstm_hidden_size, output_size)

        self.hidden_state = None
        self.prev_feat = None

    def forward(self, x):
        x, hidden_state = self.lstm(x, self.hidden_state)
        x = self.out_layer(x)

        self.hidden_state = tuple(h.detach() for h in hidden_state)

        return x

    def reset(self):
        self.hidden_state = None
        self.prev_feat = None


class CNNBackbone(nn.Module):
    """CNNBackbone特征提取骨干网络

    *input*

    (B, L, C, H, W)

    *output*

    (B, L, out_features)
    """
    class BackboneType(enum.Enum):
        AlexNet = "alexnet"
        ConvNeXtv2 = "convnextv2"

    def __init__(self, backbone_type: BackboneType, out_features: int, freeze: bool = False):
        """CNNBackbone初始化

        Args:
            backbone_type (BackboneType): 要使用的backbone类型
            out_features (int): 经过线性输出层后最终输出的特征维数
            freeze (bool, optional): 是否需要冻结CNNBackbone的部分层，默认不冻结
        """
        super().__init__()

        self.freeze_feature_net = freeze
        self.out_features = out_features

        if backbone_type == CNNBackbone.BackboneType.ConvNeXtv2:
            self.feature_net = convnextv2.convnextv2_tiny(pretrained=True)
            self.feature_net.head = nn.Identity()
            self.feature_net_out_size = 768
            if freeze:
                for i in range(0, 3):
                    for param in self.feature_net.downsample_layers[i].parameters():
                        param.requires_grad = False
                    for param in self.feature_net.stages[i].parameters():
                        param.requires_grad = False
        elif backbone_type == CNNBackbone.BackboneType.AlexNet:
            self.feature_net = models.alexnet(weights=models.AlexNet_Weights)
            self.feature_net.classifier = self.feature_net.classifier[:3]
            self.feature_net_out_size = 4096

        self.output_layer = nn.Linear(self.feature_net_out_size, out_features)

    def forward(self, x):
        B, S = x.shape[:2]

        x = x.flatten(end_dim=1)
        x = self.feature_net(x)
        x = self.output_layer(x)
        x = x.view(B, S, -1)

        return [x]


class CrossAttention(nn.Module):
    """简化版的CrossAttention交叉注意力

    x视为Q，context视为K与V

    *input*

    - x: (B, L, emb_dim)
    - context: (B, S, emb_dim)

    *output*

    (B, L, emb_dim)
    """

    def __init__(self, emb_dim=768):
        super().__init__()
        self.emb_dim = emb_dim
        self.scale = emb_dim ** -0.5

        self.Wq = nn.Linear(emb_dim, emb_dim)
        self.Wk = nn.Linear(emb_dim, emb_dim)
        self.Wv = nn.Linear(emb_dim, emb_dim)

    def forward(self, x: torch.Tensor, context: torch.Tensor, pad_mask: torch.Tensor = None):
        """CrossAttention前向传播

        *input*

        - x: (B, L, emb_dim)
        - context: (B, S, emb_dim)

        *output*

        (B, L, emb_dim)

        Args:
            x (torch.Tensor): 当前原始内容
            context (torch.Tensor): 上下文内容
            pad_mask (torch.Tensor, optional): 可选的mask，可认为会给标记的位置填充负无穷
        """
        Q = self.Wq(x)
        K = self.Wk(context)
        V = self.Wv(context)

        # (B, L, S)
        att_weights = torch.einsum('bid,bjd -> bij', Q, K)
        att_weights = att_weights * self.scale

        if pad_mask is not None:
            att_weights = att_weights.masked_fill(pad_mask, -1e9)

        # (B, L, S)
        att_weights = F.softmax(att_weights, dim=-1)
        # (B, L, emb_dim)
        out = torch.einsum('bij, bjd -> bid', att_weights, V)

        return out


class TemporalCNN(nn.Module):
    """时域TemporalCNN

    由特征提取骨干网络与序列分类头组成，输入输出详见forward
    """
    class HeadType(enum.Enum):
        LSTM = "lstm"

    def __init__(self, backbone_out_features: int, backbone: CNNBackbone.BackboneType, head: HeadType, head_outsize: int,
                 freeze_cnn: bool = False, enable_cross_attn: bool = False, cache_size: int = -1):
        """TemporalCNN初始化

        Args:
            cnn_out_features (int): 指定所用CNN backbone输出层的输出特征维数
            backbone (CNNBackbone.BackboneType): 指定要使用的backbone类型
            head (HeadType): 指定要使用的head类型
            head_outsize (int): 指定head输出序列的特征维数，即最终输出的特征维数
            freeze_cnn (bool, optional): 是否需要冻结CNNBackbone的部分层，默认不冻结
            enable_cross_attn (bool, optional): 是否开启交叉注意力，默认不开启
            cache_size (int, optional): 指定使用的cache的大小，默认不限制
        """
        super().__init__()

        self.enable_cross_attn = enable_cross_attn
        self.cache_size = cache_size

        self.backbone = CNNBackbone(
            backbone, backbone_out_features, freeze_cnn)

        if head == self.HeadType.LSTM:
            self.temporal_head = LSTMHead(
                self.backbone.feature_net_out_size, head_outsize)
        # elif head == 'tcn':
        #     self.temporal_head = MSTCNHead(self.cnn.feature_size, out_size)

        if self.enable_cross_attn:
            self.cross_attn = CrossAttention(
                emb_dim=self.backbone.feature_net_out_size)
            self.cross_attn_head = LSTMHead(
                self.backbone.feature_net_out_size, head_outsize)

        self.cache = []

    def forward(self, x: torch.Tensor, long_term: torch.Tensor = None, save_cache: bool = False):
        """TemporalCNN前向传播

        x要求尺寸为 (B, L, C, H, W)；
        long_term为上下文内容，实际上为前序帧特征，要求尺寸为 (S, feat_dim)

        此项前向传播逻辑顺序：
        1. 若需要缓存，将当前帧存入缓存并返回整个缓存 (len(cache)·B·L, feature_net_out_size)
        2. 若给入了long_term，使用Max-R方法融合当前帧和前序帧并返回经过head的结果 (B, L, head_outsize)
        3. 正常提取特征并返回经过head的结果 (B, L, head_outsize)

        Args:
            x (torch.Tensor): 当前帧的数据
            long_term (torch.Tensor, optional): 前序帧的数据，用于计算与当前帧的相关性
            save_cache (bool, optional): 是否将当前帧数据存入缓存，默认不缓存
        """

        if save_cache:
            # 更新缓存，返回当前整个缓存
            x = self.extract_image_features_and_cache(x)
            return x
        else:
            # 正常提取特征
            x = self.extract_image_features(x)
            if long_term is None:
                # 无需融合，经过TemporalCNN头直接返回
                x = self.temporal_head(x)
                return x
            else:
                # 融合long_term，先计算response
                L = x.shape[1]
                S = long_term.shape[0]
                # unsqueeze一下，方便直接Hadamard乘
                long_term = long_term.unsqueeze(0)
                # 此处即为原论文中所提到的“read adaptive”，事实上就是根据batch大小进行自适应
                # single_reponse事实上对应于原论文中的“response matrix S”
                single_response = torch.zeros(S - L + 1)
                for i in range(S - L + 1):
                    single_response[i] = (x*long_term[:, i:i+L, :]).mean()
                # 计算后缀和，对应于论文中的“clip response matrix P”
                multi_response = torch.flip(torch.cumsum(
                    torch.flip(single_response, dims=[0]), dim=0), dims=[0])
                # argmax获取delta
                delta_idx = multi_response.max(dim=0)[-1].long()
                # (B, L, feature_net_out_size)
                ca = self.cross_attn(x, long_term[:, delta_idx:, :])
                # (B, L, head_outsize)
                ca = self.cross_attn_head(ca)
                # (B, L, head_outsize)
                x = self.temporal_head(x)
                return (x + ca) / 2

    def extract_image_features(self, x):
        """正常进行特征提取

        *intput*

        (B, L, C, H, W)

        *output*

        (B, L, feature_net_out_size)
        """
        B = x.size(0)
        S = x.size(1)

        x = x.flatten(end_dim=1)
        x = self.backbone.feature_net(x)

        x = x.view(B, S, -1)

        return x

    def extract_image_features_and_cache(self, x):
        """特征提取并缓存

        进行特征提取后将当前帧特征一并存入缓存中，最终返回的是添加了当前帧的完整缓存内容

        *input*

        (B, L, C, H, W)

        *output*

        (len(cache)·B·L, feature_net_out_size)
        """
        # (B·L, C, H, W)
        x = x.flatten(end_dim=1)
        # (B·L, feature_net_out_size)
        x = self.backbone.feature_net(x)
        self.cache.append(x)
        # 缓存容量不够时淘汰较老的帧特征
        if self.cache_size != -1 and len(self.cache) > self.cache_size:
            self.cache.pop(0)

        return torch.cat(self.cache, dim=0)

'''
class DACAT(nn.Module):
    class Mode(enum.Enum):
        """DACAT模型的模式

        extractor和predictor的结构有所差别，主要体现在利用TemporalCNN的方式上
        """
        EXTRACTOR = "extractor"
        PREDICTOR = "predictor"

    def __init__(self, mode: Mode, backbone: CNNBackbone.BackboneType, backbone_out_features: int, head: TemporalCNN.HeadType,
                 temp_outsize: int, seq_len: int, freeze_cnn: bool = False):
        """DACAT模型本身

        *input*

        (B, L, C, H, W)

        *output*

        (B, L, temp_outsize)

        Args:
            mode (Mode): 模型运行模式
            backbone (CNNBackbone.BackboneType): 模型要使用的CNNBackbone类型
            backbone_out_features (int): 指定CNNBackbone最终的输出特征维数
            head (TemporalCNN.HeadType): 指定TemporalCNN中要使用的头类型
            temp_outsize (int): 指定模型TemporalCNN的输出特征维数
            seq_len (int): 输入帧序列的长度
            freeze_cnn (bool, optional): 是否需要冻结CNNBackbone的部分层，默认不冻结
        """
        super().__init__()

        self.mode = mode
        self.backbone_type = backbone
        self.backbone_out_features = backbone_out_features
        self.head_type = head
        self.temporal_outsize = temp_outsize
        self.cache_size = seq_len

        if mode == DACAT.Mode.EXTRACTOR:
            self.net = TemporalCNN(
                backbone_out_features, backbone, head, temp_outsize, freeze_cnn).cuda()
        elif mode == DACAT.Mode.PREDICTOR:
            self.net_context = TemporalCNN(
                backbone_out_features, backbone, head, temp_outsize, freeze_cnn).cuda()
            self.net = TemporalCNN(backbone_out_features, backbone, head, temp_outsize,
                                   freeze_cnn, enable_cross_attn=True, cache_size=seq_len)

    def forward(self, x):
        if self.mode == DACAT.Mode.EXTRACTOR:
            output = self.net(x)
        elif self.mode == DACAT.Mode.PREDICTOR:
            ctx = self.net_context(x, save_cache=True)
            output = self.net(x, ctx)

        return output
'''

class LitDACAT(LightningModule):
    class Mode(enum.Enum):
        """DACAT模型的模式
        
        extractor为骨干特征提取网络，对应于原代码的第一步训练，使用单个TemporalCNN；
        predictor为DACAT网络，对应于原代码中的第二步long-short，使用两个TemporalCNN
        """
        EXTRACTOR = "extractor"
        PREDICTOR = "predictor"

    def __init__(self, mode: Mode, backbone: CNNBackbone.BackboneType, backbone_out_features: int, head: TemporalCNN.HeadType,
                 temp_outsize: int, seq_len: int, freeze_cnn: bool = False, lr: float=0.001, weight_dacay: float=0.01):
        """DACAT模型本身

        *input*

        (B, L, C, H, W)

        *output*

        (B, L, temp_outsize)

        Args:
            mode (Mode): 模型运行模式
            backbone (CNNBackbone.BackboneType): 模型要使用的CNNBackbone类型
            backbone_out_features (int): 指定CNNBackbone最终的输出特征维数
            head (TemporalCNN.HeadType): 指定TemporalCNN中要使用的头类型
            temp_outsize (int): 指定模型TemporalCNN的输出特征维数
            seq_len (int): 输入帧序列的长度
            freeze_cnn (bool, optional): 是否需要冻结CNNBackbone的部分层，默认不冻结
        """
        super().__init__()

        self.mode = mode
        self.backbone_type = backbone
        self.backbone_out_features = backbone_out_features
        self.head_type = head
        self.temporal_outsize = temp_outsize
        self.cache_size = seq_len
        self.freeze_cnn = freeze_cnn
        self.lr = lr
        self.weight_decay = weight_dacay

        # 定义损失
        self.ce = nn.CrossEntropyLoss(reduction="mean", weight=torch.tensor([
            1.6411019141231247,
            0.19090963801041133,
            1.0,
            0.2502662616859295,
            1.9176363911137977,
            0.9840248158200853,
            2.174635818337618,
        ]))

        self.net = TemporalCNN(backbone_out_features, backbone, head, temp_outsize, freeze_cnn)
        self.net_front = TemporalCNN(backbone_out_features, backbone, head, temp_outsize,
                                     freeze_cnn, enable_cross_attn=True, cache_size=seq_len)
        if mode == LitDACAT.Mode.EXTRACTOR:
            pass
        elif mode == LitDACAT.Mode.PREDICTOR:
            self.net.requires_grad_(False)
        
        self.val_accuracy = MulticlassAccuracy(num_classes=7, average="micro")
        self.total_val_loss = 0
        self.val_batch_cnt = 0
        self.save_hyperparameters("backbone", "backbone_out_features", "head", "temp_outsize", "seq_len")
        
    def on_train_epoch_start(self) -> None:
        self.val_accuracy.reset()

    def training_step(self, batch, batch_idx):
        # (B, seq_len, C, H, W) (B, seq_len)
        seq, target = batch

        if self.mode == LitDACAT.Mode.EXTRACTOR:
            # (B, L, temp_outsize)
            output = self.net(seq)
        elif self.mode == LitDACAT.Mode.PREDICTOR:
            # (len(cache)·B·L, feature_net_out_size)
            ctx = self.net(seq, save_cache=True)
            # (B, L, temp_outsize)
            output = self.net_front(seq, ctx)
        
        # 调整类别维位置以计算损失
        # (B, temp_outsize, L)
        pred = output.transpose(1, 2)
        loss = self.ce(pred, target)
        step_acc = self.val_accuracy(pred, target)
        self.log("train_step_loss", loss, on_step=True, on_epoch=False, prog_bar=True, logger=True)
        self.log("train_step_acc", step_acc, on_step=True, on_epoch=False, prog_bar=True, logger=True)
        return loss
    
    def on_train_epoch_end(self) -> None:
        self.log("train_avg_acc", self.val_accuracy.compute(), on_epoch=True, prog_bar=True, logger=True)
    
    def configure_optimizers(self):
        return torch.optim.AdamW(self.net.parameters(), lr=self.lr, weight_decay=self.weight_decay)

    def on_validation_epoch_start(self):
        self.val_accuracy.reset()
        self.total_val_loss = 0
        self.val_batch_cnt = 0

    def validation_step(self, batch, batch_idx):
        # (B, seq_len, C, H, W) (B, seq_len)
        seq, target = batch

        if self.mode == LitDACAT.Mode.EXTRACTOR:
            # (B, L, temp_outsize)
            output = self.net(seq)
        elif self.mode == LitDACAT.Mode.PREDICTOR:
            # (len(cache)·B·L, feature_net_out_size)
            ctx = self.net(seq, save_cache=True)
            # (B, L, temp_outsize)
            output = self.net_front(seq, ctx)

        # 调整类别维位置以计算损失
        # (B, temp_outsize, L)
        pred = output.transpose(1, 2)
        loss = self.ce(pred, target)
        
        self.val_accuracy(pred, target)
        B, seq_len = target.shape
        self.total_val_loss += loss * B * seq_len
        self.val_batch_cnt += B * seq_len
    
    def on_validation_epoch_end(self):
        avg_loss = self.total_val_loss / self.val_batch_cnt
        self.log("val_avg_loss", avg_loss, on_epoch=True, prog_bar=True, logger=True)
        self.log("val_avg_acc", self.val_accuracy.compute(), on_epoch=True, prog_bar=True, logger=True)
