import torch
from torch import nn
import pytorch_lightning as pl
from torchvision import models
from typing import List
import torch.nn.functional as F
import random
import numpy as np
import sys
import warnings
from sklearn.metrics import average_precision_score
from prettytable import PrettyTable


class MTFiST(pl.LightningModule):
    def __init__(self, seq_length:int=10, m1: float=0.005, m2: float=0.01):
        """初始化MTFiST模型

        Args:
            seq_length (int, optional): 内部要送入lstm模块时的序列长度，默认为10，要求能被batch size整除
        """
        super().__init__()
        resnet = models.resnet50(pretrained=True)

        self.seq_length = seq_length
        self.m1 = m1
        self.m2 = m2

        # 特征提取
        self.spatial_extractor = torch.nn.Sequential()
        self.spatial_extractor.add_module("conv1", resnet.conv1)
        self.spatial_extractor.add_module("bn1", resnet.bn1)
        self.spatial_extractor.add_module("relu", resnet.relu)
        self.spatial_extractor.add_module("maxpool", resnet.maxpool)
        self.spatial_extractor.add_module("layer1", resnet.layer1)
        self.spatial_extractor.add_module("layer2", resnet.layer2)
        self.spatial_extractor.add_module("layer3", resnet.layer3)
        self.spatial_extractor.add_module("layer4", resnet.layer4)

        self.avg_pool = resnet.avgpool

        # 第一个LSTM用于instrument和target；第二个LSTM用于verb和triplet
        self.lstm1 = nn.LSTM(2040, 512, batch_first=True)
        self.lstm2 = nn.LSTM(2000, 512, batch_first=True)

        self.fc_h = nn.Linear(512, 2040)

        print("初始化lstm与fc权重")
        nn.init.xavier_normal_(self.lstm1.all_weights[0][0])
        nn.init.xavier_normal_(self.lstm1.all_weights[0][1])
        nn.init.xavier_normal_(self.lstm2.all_weights[0][0])
        nn.init.xavier_normal_(self.lstm2.all_weights[0][1])
        nn.init.xavier_uniform_(self.fc_h.weight)

        # ResNet50 layer4的输出特征维度数
        feature_size = 2048
        
        # 用于得到四个branch的1x1卷积
        self.features2branch1 = nn.Conv2d(feature_size, 2040, kernel_size=1, stride=1, padding=0)
        self.features2branch2 = nn.Conv2d(feature_size, 2000, kernel_size=1, stride=1, padding=0)
        self.features2branch3 = nn.Conv2d(feature_size, 2040, kernel_size=1, stride=1, padding=0)
        self.features2branch4 = nn.Conv2d(feature_size, 2000, kernel_size=1, stride=1, padding=0)

        # 用于global loss中的分类头，对应于四个branch (instrument, verb, target, triplet)
        self.classifier1 = nn.Sequential(
            nn.BatchNorm1d(2040),
            nn.Linear(2040, feature_size),
            nn.BatchNorm1d(feature_size),
            nn.ELU(inplace=True),
            nn.Linear(feature_size, 6),
        )
        self.classifier2 = nn.Sequential(
            nn.BatchNorm1d(2040),
            nn.Linear(2040, feature_size),
            nn.BatchNorm1d(feature_size),
            nn.ELU(inplace=True),
            nn.Linear(feature_size, 10),
        )
        self.classifier3 = nn.Sequential(
            nn.BatchNorm1d(2040),
            nn.Linear(2040, feature_size),
            nn.BatchNorm1d(feature_size),
            nn.ELU(inplace=True),
            nn.Linear(feature_size, 15),
        )
        self.classifier4 = nn.Sequential(
            nn.BatchNorm1d(2040),
            nn.Linear(2040, feature_size),
            nn.BatchNorm1d(feature_size),
            nn.ELU(inplace=True),
            nn.Linear(feature_size, 100),
        )
    
    def configure_optimizers(self):
        optimizer = torch.optim.SGD([
            {'params': self.spatial_extractor.parameters(), 'lr': 1e-3},
            {'params': self.features2branch1.parameters(), 'lr': 1e-3},
            {'params': self.features2branch2.parameters(), 'lr': 1e-3},
            {'params': self.features2branch3.parameters(), 'lr': 1e-3},
            {'params': self.features2branch4.parameters(), 'lr': 1e-3},
            {'params': self.classifier1.parameters(), 'lr': 1e-3},
            {'params': self.classifier2.parameters(), 'lr': 1e-3},
            {'params': self.classifier3.parameters(), 'lr': 1e-3},
            {'params': self.classifier4.parameters(), 'lr': 1e-3},
            {'params': self.lstm1.parameters(), 'lr': 1e-3},
            {'params': self.lstm2.parameters(), 'lr': 1e-3},
            {'params': self.fc_h.parameters(), 'lr': 1e-3},
        ], lr=1e-4)
        return optimizer
    
    @staticmethod
    def calcu_mc_loss(x: torch.Tensor, gt: torch.LongTensor, num_channel: int):
        """计算L_div与L_dis

        对于输入的(B, c, H, W)，c//num_channel应等于分类数量，记为C

        Args:
            x (torch.Tensor): 特征图，尺寸为 (B, c, H, W)
            gt (List[int]): 要求为one-hot的标签，即尺寸为 (B, C)
            num_channel (int): 每个分类类别分配到的channel数量
        """
        # 先计算L_div
        # (B, c, H, W)
        branch = x
        B, c, H, W = branch.shape
        assert B == len(gt)
        C = gt.shape[1]
        assert c // num_channel == C
        # Softmax
        # (B, c, H·W)
        branch = branch.reshape(B, c, -1).softmax(dim=2)
        # (B, c, H, W)
        branch = branch.reshape(B, c, H, W)
        # Cross-Channel Max Pooling
        # (B, W, H, c)
        branch = branch.transpose(1, 3)
        # (B, W, H, c//num_channel)，即 (B, W, H, C)
        branch = F.max_pool2d(branch, kernel_size=(1, num_channel), stride=(1, num_channel))
        # (B, C, H·W)
        branch: torch.Tensor = branch.transpose(1, 3).reshape(B, C, -1)
        # Sum by HW & Average by C
        # 此处原实现和原论文似乎略有出入，按论文应该按维度C取平均，共得到B个loss
        # 前一行为按论文的实现，后一行为按原代码的实现
        # loss_div = torch.mean(branch.sum(-1), dim=1)
        loss_div = 1.0 - 1.0*torch.mean(branch.sum(-1)) / num_channel
    
        # 再计算L_dis
        # 生成遮挡一半channel的随机mask，各batch上的mask相同
        mask = []
        template = [0] * (num_channel // 2) + [1] * (num_channel - num_channel // 2)
        for i in range(C):
            random.shuffle(template)
            mask += template
        mask = mask * B
        # (B, c, 1, 1)
        mask = torch.tensor(mask, dtype=torch.float32).reshape(B, c, 1, 1).cuda()
        # Channel-Wise Attention
        # (B, c, H, W)
        branch2 = x * mask
        # Cross-Channel Max Pooling
        # (B, W, H, c)
        branch2 = branch2.transpose(1, 3)
        # (B, W, H, C)
        branch2 = F.max_pool2d(branch2, kernel_size=(1, num_channel), stride=(1, num_channel))
        # (B, C, H, W)
        branch2: torch.Tensor = branch2.transpose(1, 3)
        # (B, C, 1, 1)
        branch2 = nn.AvgPool2d(kernel_size=(H, W))(branch2)
        # (B, C)
        branch2 = branch2.view(B, -1)
        loss_dis = F.binary_cross_entropy_with_logits(branch2, gt.float())

        return loss_dis, loss_div

    def training_step(self, batch, batch_idx):
        imgs, instrument_id, verb_id, target_id, triplet_id = batch
        instrument_onehot = F.one_hot(torch.tensor(instrument_id), num_classes=6)
        verb_onehot = F.one_hot(torch.tensor(verb_id), num_classes=10)
        target_onehot = F.one_hot(torch.tensor(target_id), num_classes=15)
        triplet_onehot = F.one_hot(torch.tensor(triplet_id), num_classes=100)

        B = imgs.shape[0]
        assert B % self.seq_length == 0, "batch size无法整除指定的序列长度"
        # (B, 2048, 7, 7)
        x = self.spatial_extractor(imgs)
        # (B, 2040, 7, 7)
        branch1 = self.features2branch1(x)
        # (B, 2000, 7, 7)
        branch2 = self.features2branch2(x)
        # (B, 2040, 7, 7)
        branch3 = self.features2branch3(x)
        # (B, 2000, 7, 7)
        branch4 = self.features2branch4(x)

        # 计算各branch的L_div和L_dis
        MC_loss_1 = MTFiST.calcu_mc_loss(branch1, instrument_onehot, 2040 // 6)
        MC_loss_2 = MTFiST.calcu_mc_loss(branch2, verb_onehot, 2000 // 10)
        MC_loss_3 = MTFiST.calcu_mc_loss(branch3, target_onehot, 2040 // 15)
        MC_loss_4 = MTFiST.calcu_mc_loss(branch4, triplet_onehot, 2000 // 100)

        # (B, 2040, 1, 1) (B, 2000, 1, 1) (B, 2040, 1, 1) (B, 2000, 1, 1)
        branch1, branch2, branch3, branch4 = self.avg_pool(branch1), self.avg_pool(branch2), self.avg_pool(branch3), self.avg_pool(branch4)
        # (B // seq_len, seq_len, 2040) (B // seq_len, seq_len, 2000) (B // seq_len, seq_len, 2040) (B // seq_len, seq_len, 2000)
        branch1, branch2, branch3, branch4 = (
            branch1.view(-1, self.seq_length, 2040),
            branch2.view(-1, self.seq_length, 2000),
            branch3.view(-1, self.seq_length, 2040),
            branch4.view(-1, self.seq_length, 2000)
        )
        # (B // seq_len, seq_len, 512) (B // seq_len, seq_len, 512) (B // seq_len, seq_len, 512) (B // seq_len, seq_len, 512)
        pred1, _ = self.lstm1(branch1)
        pred2, _ = self.lstm2(branch2)
        pred3, _ = self.lstm1(branch3)
        pred4, _ = self.lstm2(branch4)
        # (B, 512)
        pred1, pred2, pred3, pred4 = (
            pred1.reshape(-1, 512),
            pred2.reshape(-1, 512),
            pred3.reshape(-1, 512),
            pred4.reshape(-1, 512)
        )
        # (B, 2040)
        pred1, pred2, pred3, pred4 = (
            F.relu(self.fc_h(pred1.contiguous())),
            F.relu(self.fc_h(pred2.contiguous())),
            F.relu(self.fc_h(pred3.contiguous())),
            F.relu(self.fc_h(pred4.contiguous())),
        )
        # (B, 6) (B, 10) (B, 15) (B, 100)
        pred1, pred2, pred3, pred4 = (
            self.classifier1(pred1),
            self.classifier2(pred2),
            self.classifier3(pred3),
            self.classifier4(pred4)
        )

        # 计算L_ce
        loss_ce1 = F.binary_cross_entropy_with_logits(pred1, instrument_onehot.float())
        loss_ce2 = F.binary_cross_entropy_with_logits(pred2, verb_onehot.float())
        loss_ce3 = F.binary_cross_entropy_with_logits(pred3, target_onehot.float())
        loss_ce4 = F.binary_cross_entropy_with_logits(pred4, triplet_onehot.float())


        loss = loss_ce1 + loss_ce2 + loss_ce3 + loss_ce4
        for mc_loss in [MC_loss_1, MC_loss_2, MC_loss_3, MC_loss_4]:
            loss += self.m1 * mc_loss[0] + self.m2 * mc_loss[1]

        return loss

    def on_validation_epoch_start(self):
        self.instrument_pred, self.instrument_gt = [], []
        self.verb_pred, self.verb_gt = [], []
        self.target_pred, self.target_gt = [], []
        self.triplet_pred, self.triplet_gt = [], []
    
    def validation_step(self, batch, batch_idx):
        imgs, instrument_id, verb_id, target_id, triplet_id = batch
        instrument_onehot = F.one_hot(torch.tensor(instrument_id), num_classes=6)
        verb_onehot = F.one_hot(torch.tensor(verb_id), num_classes=10)
        target_onehot = F.one_hot(torch.tensor(target_id), num_classes=15)
        triplet_onehot = F.one_hot(torch.tensor(triplet_id), num_classes=100)

        B = imgs.shape[0]
        assert B % self.seq_length == 0, "batch size无法整除指定的序列长度"
        # (B, 2048, 7, 7)
        x = self.spatial_extractor(imgs)
        # (B, 2040, 7, 7)
        branch1 = self.features2branch1(x)
        # (B, 2000, 7, 7)
        branch2 = self.features2branch2(x)
        # (B, 2040, 7, 7)
        branch3 = self.features2branch3(x)
        # (B, 2000, 7, 7)
        branch4 = self.features2branch4(x)

        # (B, 2040, 1, 1) (B, 2000, 1, 1) (B, 2040, 1, 1) (B, 2000, 1, 1)
        branch1, branch2, branch3, branch4 = self.avg_pool(branch1), self.avg_pool(branch2), self.avg_pool(branch3), self.avg_pool(branch4)
        # (B // seq_len, seq_len, 2040) (B // seq_len, seq_len, 2000) (B // seq_len, seq_len, 2040) (B // seq_len, seq_len, 2000)
        branch1, branch2, branch3, branch4 = (
            branch1.view(-1, self.seq_length, 2040),
            branch2.view(-1, self.seq_length, 2000),
            branch3.view(-1, self.seq_length, 2040),
            branch4.view(-1, self.seq_length, 2000)
        )
        # (B // seq_len, seq_len, 512) (B // seq_len, seq_len, 512) (B // seq_len, seq_len, 512) (B // seq_len, seq_len, 512)
        pred1, _ = self.lstm1(branch1)
        pred2, _ = self.lstm2(branch2)
        pred3, _ = self.lstm1(branch3)
        pred4, _ = self.lstm2(branch4)
        # (B, 512)
        pred1, pred2, pred3, pred4 = (
            pred1.reshape(-1, 512),
            pred2.reshape(-1, 512),
            pred3.reshape(-1, 512),
            pred4.reshape(-1, 512)
        )
        # (B, 2040)
        pred1, pred2, pred3, pred4 = (
            F.relu(self.fc_h(pred1.contiguous())),
            F.relu(self.fc_h(pred2.contiguous())),
            F.relu(self.fc_h(pred3.contiguous())),
            F.relu(self.fc_h(pred4.contiguous())),
        )
        # (B, 6) (B, 10) (B, 15) (B, 100)
        pred1, pred2, pred3, pred4 = (
            self.classifier1(pred1),
            self.classifier2(pred2),
            self.classifier3(pred3),
            self.classifier4(pred4)
        )

        pred1, pred2, pred3, pred4 = (
            pred1.cpu().float(),
            pred2.cpu().float(),
            pred3.cpu().float(),
            pred4.cpu().float()
        )

        # 此处按原代码规则，取每个进入lstm的序列的末尾元素进行验证
        self.instrument_pred.append(F.sigmoid(pred1[self.seq_length - 1::self.seq_length]).cpu())
        self.verb_pred.append(F.sigmoid(pred2[self.seq_length - 1::self.seq_length]).cpu())
        self.target_pred.append(F.sigmoid(pred3[self.seq_length - 1::self.seq_length]).cpu())
        self.triplet_pred.append(F.sigmoid(pred4[self.seq_length - 1::self.seq_length]).cpu())

        self.instrument_gt.append(instrument_onehot.cpu()[self.seq_length - 1::self.seq_length])
        self.verb_gt.append(verb_onehot.cpu()[self.seq_length - 1::self.seq_length])
        self.target_gt.append(target_onehot.cpu()[self.seq_length - 1::self.seq_length])
        self.triplet_gt.append(triplet_onehot.cpu()[self.seq_length - 1::self.seq_length])
    
    def on_validation_epoch_end(self):
        metric = Recognition()
        metric.reset()
        metric.update(
            torch.cat(self.triplet_gt, dim=0),
            torch.cat(self.triplet_pred, dim=0)
        )
        mAP_i = metric.compute_AP('i')["mAP"]
        mAP_v = metric.compute_AP('v')["mAP"]
        mAP_t = metric.compute_AP('t')["mAP"]
        mAP_iv = metric.compute_AP('iv')["mAP"]
        mAP_it = metric.compute_AP('it')["mAP"]
        mAP_ivt = metric.compute_AP('ivt')["mAP"]

        self.log("mAP_i", mAP_i, logger=True, on_epoch=True, prog_bar=False, on_step=False)
        self.log("mAP_v", mAP_v, logger=True, on_epoch=True, prog_bar=False, on_step=False)
        self.log("mAP_t", mAP_t, logger=True, on_epoch=True, prog_bar=False, on_step=False)
        self.log("mAP_iv", mAP_iv, logger=True, on_epoch=True, prog_bar=False, on_step=False)
        self.log("mAP_it", mAP_it, logger=True, on_epoch=True, prog_bar=False, on_step=False)
        self.log("mAP_ivt", mAP_ivt, logger=True, on_epoch=True, prog_bar=False, on_step=False)

        print(f"========= Epoch {self.current_epoch} validation report =========")
        table = PrettyTable(field_names=["Components", "mAP"])
        table.add_rows([
            ["instrument", mAP_i],
            ["verb", mAP_v],
            ["target", mAP_t],
            ["instrument & verb", mAP_iv],
            ["instrument & target", mAP_it],
            ["triplet", mAP_ivt]
        ])
        print(table)


class Disentangle(object):
    """
    Class: filter a triplet prediction into the components (such as instrument i, verb v, target t, instrument-verb iv, instrument-target it, etc)    
    @args
    ----
        url: str. path to the dictionary map file of the dataset decomposition labels            
    @params
    ----------
    bank :   2D array
        holds the dictionary mapping of all components    
    @methods
    ----------
    extract(input, componet): 
        call filter a component labels from the inputs labels     
    """

    def __init__(self, url="maps.txt"):
        self.bank = self.map_file()
#        self.bank = np.genfromtxt(url, dtype=int, comments='#', delimiter=',', skip_header=0)
        
    def decompose(self, inputs, component):
        """ Extract the component labels from the triplets.
            @args:
                inputs: a 1D vector of dimension (n), where n = number of triplet classes;
                        with values int(0 or 1) for target labels and float[0, 1] for predicted labels.
                component: a string for the component to extract; 
                        (e.g.: i for instrument, v for verb, t for target, iv for instrument-verb pair, it for instrument-target pair and vt (unused) for verb-target pair)
            @return:
                output: int or float sparse encoding 1D vector of dimension (n), where n = number of component's classes.
        """
        txt2id = {'ivt':0, 'i':1, 'v':2, 't':3, 'iv':4, 'it':5, 'vt':6} 
        key    = txt2id[component]
        index  = sorted(np.unique(self.bank[:,key]))
        output = []
        for idx in index:
            same_class  = [i for i,x in enumerate(self.bank[:,key]) if x==idx]
            y           = np.max(np.array(inputs[same_class]))
            output.append(y)        
        return output
    
    def extract(self, inputs, component="i"):
        """
        Extract a component label from the triplet label
        @args
        ----
        inputs: 2D array,
            triplet labels, either predicted label or the groundtruth
        component: str,
            the symbol of the component to extract, choose from
            i: instrument
            v: verb
            t: target
            iv: instrument-verb
            it: instrument-target
            vt: verb-target (not useful)
        @return
        ------
        label: 2D array,
            filtered component's labels of the same shape and data type as the inputs
        """      
        if component == "ivt":
            return inputs
        else:
            component = [component]* len(inputs)
            return np.array(list(map(self.decompose, inputs, component)))

    def map_file(self):
        return np.array([ 
                       [ 0,  0,  2,  1,  2,  1],
                       [ 1,  0,  2,  0,  2,  0],
                       [ 2,  0,  2, 10,  2, 10],
                       [ 3,  0,  0,  3,  0,  3],
                       [ 4,  0,  0,  2,  0,  2],
                       [ 5,  0,  0,  4,  0,  4],
                       [ 6,  0,  0,  1,  0,  1],
                       [ 7,  0,  0,  0,  0,  0],
                       [ 8,  0,  0, 12,  0, 12],
                       [ 9,  0,  0,  8,  0,  8],
                       [10,  0,  0, 10,  0, 10],
                       [11,  0,  0, 11,  0, 11],
                       [12,  0,  0, 13,  0, 13],
                       [13,  0,  8,  0,  8,  0],
                       [14,  0,  1,  2,  1,  2],
                       [15,  0,  1,  4,  1,  4],
                       [16,  0,  1,  1,  1,  1],
                       [17,  0,  1,  0,  1,  0],
                       [18,  0,  1, 12,  1, 12],
                       [19,  0,  1,  8,  1,  8],
                       [20,  0,  1, 10,  1, 10],
                       [21,  0,  1, 11,  1, 11],
                       [22,  1,  3,  7, 13, 22],
                       [23,  1,  3,  5, 13, 20],
                       [24,  1,  3,  3, 13, 18],
                       [25,  1,  3,  2, 13, 17],
                       [26,  1,  3,  4, 13, 19],
                       [27,  1,  3,  1, 13, 16],
                       [28,  1,  3,  0, 13, 15],
                       [29,  1,  3,  8, 13, 23],
                       [30,  1,  3, 10, 13, 25],
                       [31,  1,  3, 11, 13, 26],
                       [32,  1,  2,  9, 12, 24],
                       [33,  1,  2,  3, 12, 18],
                       [34,  1,  2,  2, 12, 17],
                       [35,  1,  2,  1, 12, 16],
                       [36,  1,  2,  0, 12, 15],
                       [37,  1,  2, 10, 12, 25],
                       [38,  1,  0,  1, 10, 16],
                       [39,  1,  0,  8, 10, 23],
                       [40,  1,  0, 13, 10, 28],
                       [41,  1,  1,  2, 11, 17],
                       [42,  1,  1,  4, 11, 19],
                       [43,  1,  1,  0, 11, 15],
                       [44,  1,  1,  8, 11, 23],
                       [45,  1,  1, 10, 11, 25],
                       [46,  2,  3,  5, 23, 35],
                       [47,  2,  3,  3, 23, 33],
                       [48,  2,  3,  2, 23, 32],
                       [49,  2,  3,  4, 23, 34],
                       [50,  2,  3,  1, 23, 31],
                       [51,  2,  3,  0, 23, 30],
                       [52,  2,  3,  8, 23, 38],
                       [53,  2,  3, 10, 23, 40],
                       [54,  2,  5,  5, 25, 35],
                       [55,  2,  5, 11, 25, 41],
                       [56,  2,  2,  5, 22, 35],
                       [57,  2,  2,  3, 22, 33],
                       [58,  2,  2,  2, 22, 32],
                       [59,  2,  2,  1, 22, 31],
                       [60,  2,  2,  0, 22, 30],
                       [61,  2,  2, 10, 22, 40],
                       [62,  2,  2, 11, 22, 41],
                       [63,  2,  1,  0, 21, 30],
                       [64,  2,  1,  8, 21, 38],
                       [65,  3,  3, 10, 33, 55],
                       [66,  3,  5,  9, 35, 54],
                       [67,  3,  5,  5, 35, 50],
                       [68,  3,  5,  3, 35, 48],
                       [69,  3,  5,  2, 35, 47],
                       [70,  3,  5,  1, 35, 46],
                       [71,  3,  5,  8, 35, 53],
                       [72,  3,  5, 10, 35, 55],
                       [73,  3,  5, 11, 35, 56],
                       [74,  3,  2,  1, 32, 46],
                       [75,  3,  2,  0, 32, 45],
                       [76,  3,  2, 10, 32, 55],
                       [77,  4,  4,  5, 44, 65],
                       [78,  4,  4,  3, 44, 63],
                       [79,  4,  4,  2, 44, 62],
                       [80,  4,  4,  4, 44, 64],
                       [81,  4,  4,  1, 44, 61],
                       [82,  5,  6,  6, 56, 81],
                       [83,  5,  2,  2, 52, 77],
                       [84,  5,  2,  4, 52, 79],
                       [85,  5,  2,  1, 52, 76],
                       [86,  5,  2,  0, 52, 75],
                       [87,  5,  2, 10, 52, 85],
                       [88,  5,  7,  7, 57, 82],
                       [89,  5,  7,  4, 57, 79],
                       [90,  5,  7,  8, 57, 83],
                       [91,  5,  1,  0, 51, 75],
                       [92,  5,  1,  8, 51, 83],
                       [93,  5,  1, 10, 51, 85],
                       [94,  0,  9, 14,  9, 14],
                       [95,  1,  9, 14, 19, 29],
                       [96,  2,  9, 14, 29, 44],
                       [97,  3,  9, 14, 39, 59],
                       [98,  4,  9, 14, 49, 74],
                       [99,  5,  9, 14, 59, 89]
                       ])


class Recognition(Disentangle):
    """
    Class: compute (mean) Average Precision    
    @args
    ----
        num_class: int, optional. The number of class of the classification task (default = 100)            
    @attributes
    ----------
    predictions:    2D array
        holds the accumulated predictions before a reset()
    targets:        2D array
        holds the accumulated groundtruths before a reset()    
    @methods
    -------
    GENERIC
    ------- 
    reset(): 
        call at the beginning of new experiment or epoch to reset all accumulators.
    update(targets, predictions): 
        call per iteration to update the class accumulators for predictions and corresponding groundtruths.   
    video_end(): 
        call at the end of every video during inference to log performance per video.
        
    RESULTS
    ----------
    compute_AP(): 
        call at any point to check the performance of all seen examples after the last reset() call.
    compute_video_AP(): 
        call at any time, usually at the end of experiment or inference, to obtain the performance of all tested videos.  
    compute_global_AP(): 
        call at any point, compute the framewise AP for all frames across all videos and mAP      
    compute_per_video_mAP(self):
        show mAP per video (not very useful)
    topk(k):
        obtain top k=[5,10,20, etc] performance
    topClass(k):
        obtain top-k correctly detected classes      
    """    
    def __init__(self, num_class=100, ignore_null=False):
        super(Recognition, self).__init__()
        np.seterr(divide='ignore', invalid='ignore')
        self.num_class = num_class
        self.ignore_null = ignore_null
        self.reset_global()   

    def resolve_nan(self, classwise):
        classwise[classwise==-0.0] = np.nan
        return classwise
        
    ##%%%%%%%%%%%%%%%%%%% RESET OP #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    def reset(self):
        "call at the beginning of new experiment or epoch to reset the accumulators for preditions and groundtruths."
        self.predictions = np.empty(shape = [0,self.num_class], dtype=np.float64)
        self.targets     = np.empty(shape = [0,self.num_class], dtype=np.int64)        
        
    def reset_global(self):
        "call at the beginning of new experiment"
        self.global_predictions = []
        self.global_targets     = []
        self.reset()    
    
    ##%%%%%%%%%%%%%%%%%%% UPDATE OP #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%        
    def update(self, targets, predictions):
        """
        update prediction function
        @args
        -----
        targets: 2D array, float
            groundtruth of shape (F, C) where F = number of frames, C = number of class
        predictions: 2D array, int
            model prediction of the shape as the groundtruth
        """
        self.predictions = np.append(self.predictions, predictions, axis=0)
        self.targets     = np.append(self.targets, targets, axis=0)      
        
    def video_end(self):
        "call to signal the end of current video. Needed during inference to log performance per video"        
        self.global_predictions.append(self.predictions)
        self.global_targets.append(self.targets)
        self.reset()
    
    ##%%%%%%%%%%%%%%%%%%% COMPUTE OP #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    def compute_AP(self, component="ivt", ignore_null=False):
        """
        compute performance for all seen examples after a reset()
        @args
        ----
        component: str (optional) default: ivt for triplets
            a str for the component of interest. i for instruments, v for verbs, t for targets, iv for instrument-verb, it for instrument-target, ivt for instrument-verb-target
        @return
        -------
        classwise: 1D array, float
            AP performance per class
        mean: float
            mean AP performance
        """
        if component in ["ivt", "it", "iv", "t", "v", "i"]:
            targets  = self.extract(self.targets, component)
            predicts = self.extract(self.predictions, component)
        else:
            sys.exit("Function filtering {} not yet supported!".format(component))
        with warnings.catch_warnings():
            warnings.filterwarnings(action='ignore', message='[info] triplet classes not represented in this test sample will be reported as nan values.')            
            classwise = average_precision_score(targets, predicts, average=None)
            classwise = self.resolve_nan(classwise)
            if (ignore_null and component=="ivt"): classwise = classwise[:-6]
            mean      = np.nanmean(classwise)
        return {"AP":classwise, "mAP":mean}
    
    def compute_global_AP(self, component="ivt", ignore_null=False):
        """
        compute performance for all seen examples after a reset_global()
        @args
        ----
        component: str (optional) default: ivt for triplets
            a str for the component of interest. i for instruments, v for verbs, t for targets, iv for instrument-verb, it for instrument-target, ivt for instrument-verb-target
        @return
        -------
        classwise: 1D array, float
            AP performance per class
        mean: float
            mean AP performance
        """        
        global_targets      = self.global_targets
        global_predictions  = self.global_predictions
        if len(self.targets) > 0:
            global_targets.append(self.targets)
            global_predictions.append(self.predictions)
        targets  = np.concatenate(global_targets, axis=0)
        predicts = np.concatenate(global_predictions, axis=0)
        if component in ["ivt", "it", "iv", "t", "v", "i"]:
            targets  = self.extract(targets, component)
            predicts = self.extract(predicts, component)
        else:
            sys.exit("Function filtering {} not yet supported!".format(component))            
        with warnings.catch_warnings():
            warnings.filterwarnings(action='ignore', message='[info] triplet classes not represented in this test sample will be reported as nan values.')            
            classwise = average_precision_score(targets, predicts, average=None)
            classwise = self.resolve_nan(classwise)
            if (ignore_null and component=="ivt"): classwise = classwise[:-6]
            mean      = np.nanmean(classwise)
        return {"AP":classwise, "mAP":mean}    
    
    def compute_video_AP(self, component="ivt", ignore_null=False):
        """
        compute performance video-wise AP
        @args
        ----
        component: str (optional) default: ivt for triplets
            a str for the component of interest. i for instruments, v for verbs, t for targets, iv for instrument-verb, it for instrument-target, ivt for instrument-verb-target
        @return
        -------
        classwise: 1D array, float
            AP performance per class for all videos
        mean: float
            mean AP performance for all videos
        """           
        global_targets      = self.global_targets
        global_predictions  = self.global_predictions
        if len(self.targets) > 0:
            global_targets.append(self.targets)
            global_predictions.append(self.predictions)
        video_log = []
        with warnings.catch_warnings():
            warnings.filterwarnings(action='ignore', message='')
            warnings.simplefilter("ignore", category=RuntimeWarning)
            for targets, predicts in zip(global_targets, global_predictions):
                if component in ["ivt", "it", "iv", "t", "v", "i"]:
                    targets  = self.extract(targets, component)
                    predicts = self.extract(predicts, component)
                else:
                    sys.exit("Function filtering {} not yet supported!".format(component))                        
                classwise = average_precision_score(targets, predicts, average=None)
                classwise = self.resolve_nan(classwise)
                video_log.append( classwise.reshape([1,-1]) )
            video_log = np.concatenate(video_log, axis=0)         
            videowise = np.nanmean(video_log, axis=0)
            if (ignore_null and component=="ivt"): videowise = videowise[:-6]
            mean      = np.nanmean(videowise)
        return {"AP":videowise, "mAP":mean}

    ##%%%%%%%%%%%%%%%%%%% TOP OP #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%    
    
    def topK(self, k=5, component="ivt"):
        """
        compute topK performance for all seen examples after a reset_global()        
        @args
        ----
        k: int
            number of chances of correct prediction
        component: str (optional) default: ivt for triplets
            a str for the component of interest. i for instruments, v for verbs, t for targets, iv for instrument-verb, it for instrument-target, ivt for instrument-verb-target.            
        @return
        ----
        mean: float
            mean top-k performance
        """            
        global_targets      = self.global_targets
        global_predictions  = self.global_predictions
        if len(self.targets) > 0:
            global_targets.append(self.targets)
            global_predictions.append(self.predictions)
        targets  = np.concatenate(global_targets, axis=0)
        predicts = np.concatenate(global_predictions, axis=0)
        if component in ["ivt", "it", "iv", "t", "v", "i"]:
            targets  = self.extract(targets, component)
            predicts = self.extract(predicts, component)
        else:
            sys.exit("Function filtering {} not supported yet!".format(component))
        correct = 0.0
        total   = 0
        for gt, pd in zip(targets, predicts):
            gt_pos  = np.nonzero(gt)[0]
            pd_idx  = (-pd).argsort()[:k]
            correct += len(set(gt_pos).intersection(set(pd_idx)))
            total   += len(gt_pos)
        if total==0: total=1
        return correct/total

    def topClass(self, k=10, component="ivt"):
        """
        compute top K recognize classes for all seen examples after a reset_global()        
        @args
        ----
        k: int
            number of chances of correct prediction            
        @return
        ----
        mean: float
            mean top-k recognized classes
        """
        global_targets      = self.global_targets
        global_predictions  = self.global_predictions
        if len(self.targets) > 0:
            global_targets.append(self.targets)
            global_predictions.append(self.predictions)
        targets  = np.concatenate(global_targets, axis=0)
        predicts = np.concatenate(global_predictions, axis=0)
        if component in ["ivt", "it", "iv", "t", "v", "i"]:
            targets  = self.extract(targets, component)
            predicts = self.extract(predicts, component)
        else:
            sys.exit("Function filtering {} not supported yet!".format(component))            
        classwise = average_precision_score(targets, predicts, average=None)
        classwise = self.resolve_nan(classwise)
        pd_idx    = (-classwise).argsort()[:k]
        output    = {x:classwise[x] for x in pd_idx}
        return output

# if __name__ == "__main__":
#     metric = Recognition()
#     y = torch.randn(320, 100)
#     gt = F.one_hot(torch.argmax(y, dim=-1)).float()
#     print(y.shape, gt.shape)
#     metric.reset()
#     metric.update(gt, y)
#     print(metric.compute_AP('i')["mAP"])
#     print(metric.compute_AP('v')["mAP"])
#     print(metric.compute_AP('t')["mAP"])
#     print(metric.compute_AP('iv')["mAP"])
#     print(metric.compute_AP('it')["mAP"])
#     print(metric.compute_AP('ivt')["mAP"])
#     model = MTFiST()
#     state_dict = torch.load("/home/georgehu/Documents/Projects/george-reimplement/medical/MTFiST/model/origin_pretrain.pth", map_location=torch.device("cpu"))
#     missing_keys, unexpected_keys = model.load_state_dict(state_dict)
#     print(missing_keys)
#     print(unexpected_keys)
