import numpy as np


def calc_confusion(pred_masks,gt_masks):
    pred_masks = iter(pred_masks)
    gt_masks = iter(gt_masks)
    n_class = 12
    confusion = np.zeros((n_class,n_class),dtype=np.int64)
    for pred_mask,gt_mask in zip(pred_masks,gt_masks):
        if pred_mask.ndim !=2 or gt_mask.ndim !=2:
            raise ValueError('ndim of labels should be two')
        if pred_mask.shape != gt_mask.shape:
            raise ValueError('Shape of ground truth and prediction should be same')
        pred_mask = pred_mask.flatten()
        gt_mask = gt_mask.flatten()

        mask = gt_mask>=0
        confusion +=np.bincount(
            n_class*gt_mask[mask].astype(int)+pred_mask[mask],
            minlength=n_class**2
        ).reshape((n_class,n_class))
    return confusion
def calc_iou(confusion):
    iou_denominator = (confusion.sum(axis=1)+confusion.sum(axis=0)-np.diag(confusion))
    iou = np.diag(confusion)/iou_denominator
    # 不包含背景
    return iou[:-1]
def eval_metrics(pred_masks,gt_masks):
    confusion = calc_confusion(pred_masks,gt_masks)
    iou = calc_iou(confusion)
    PA = np.diag(confusion).sum()/confusion.sum()
    CA = np.diag(confusion)/(np.sum(confusion,axis=1)+1e-10)
    return {
        'iou':iou,
        'miou':np.nanmean(iou),
        'pa':PA,
        'ca':CA,
        'mca':np.nanmean(CA[:-1])
    }