import torch
import numpy as np

from ignite.metrics import Metric

from ignite.exceptions import NotComputableError
from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced

class FPR95(Metric):


    def __init__(self, output_transform=lambda x: x, device="cpu"):
        self._labels = []
        self._dists = []
        super(FPR95, self).__init__(output_transform=output_transform, device=device)


    @reinit__is_reduced
    def reset(self):
        self._labels = []
        self._dists = []
        super().reset()


    @reinit__is_reduced
    def update(self, output):
        if output[1] is None:
            return
        dists, labels = output[1][0], output[1][1]
        dists, labels = dists.cpu().numpy(), labels.cpu().numpy()

        self._labels.append(labels)
        self._dists.append(dists)


    @sync_all_reduce("_labels", "_dists")
    def compute(self):
        self._labels = np.concatenate(self._labels, axis=0).reshape(-1)
        self._dists = np.concatenate(self._dists, axis=0).reshape(-1)

        recall_point = 0.95
        labels = self._labels[np.argsort(self._dists)]
        threshold_index = np.argmax(np.cumsum(labels) >= recall_point * np.sum(labels)) 

        FP = np.sum(labels[:threshold_index] == 0)
        TN = np.sum(labels[threshold_index:] == 0)
        
        return float(FP) / float(FP + TN)


if __name__ == "__main__":
    m = CMR()

    dis = torch.randint(6, (4, 10)).float()
    print(dis)

    for i in range(dis.size(0)):
        m.update((dis[i], None))
    
    res = m.compute()
    print(res)


