# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
import numpy as np
import os
import torch
import warnings
from mmcls.apis import multi_gpu_test, single_gpu_test
from mmcls.datasets import build_dataloader, build_dataset
from mmcls.models import build_classifier
from mmcls.utils import (auto_select_device, get_root_logger,
                         setup_multi_processes, wrap_distributed_model,
                         wrap_non_distributed_model)
from mmcv import DictAction
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
                         wrap_fp16_model)
from numbers import Number

#==================================

import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Subset, random_split
import matplotlib.patches as patches

def parse_args():
    parser = argparse.ArgumentParser(description='mmcls test model')
    parser.add_argument('config', help='test config file path')
    parser.add_argument('checkpoint', help='checkpoint file')
    parser.add_argument('--out', help='output result file')
    out_options = ['class_scores', 'pred_score', 'pred_label', 'pred_class']
    parser.add_argument(
        '--out-items',
        nargs='+',
        default=['all'],
        choices=out_options + ['none', 'all'],
        help='Besides metrics, what items will be included in the output '
        f'result file. You can choose some of ({", ".join(out_options)}), '
        'or use "all" to include all above, or use "none" to disable all of '
        'above. Defaults to output all.',
        metavar='')
    parser.add_argument(
        '--metrics',
        type=str,
        nargs='+',
        help='evaluation metrics, which depends on the dataset, e.g., '
        '"accuracy", "precision", "recall", "f1_score", "support" for single '
        'label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for '
        'multi-label dataset')

    parser.add_argument(
        '--gpu-collect',
        action='store_true',
        help='whether to use gpu to collect results')
    parser.add_argument('--tmpdir', help='tmp dir for writing some results')
    parser.add_argument(
        '--cfg-options',
        nargs='+',
        action=DictAction,
        help='override some settings in the used config, the key-value pair '
        'in xxx=yyy format will be merged into config file. If the value to '
        'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
        'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
        'Note that the quotation marks are necessary and that no white space '
        'is allowed.')
    parser.add_argument(
        '--metric-options',
        nargs='+',
        action=DictAction,
        default={},
        help='custom options for evaluation, the key-value pair in xxx=yyy '
        'format will be parsed as a dict metric_options for dataset.evaluate()'
        ' function.')
    parser.add_argument(
        '--show-options',
        nargs='+',
        action=DictAction,
        help='custom options for show_result. key-value pair in xxx=yyy.'
        'Check available options in `model.show_result`.')
    parser.add_argument(
        '--gpu-ids',
        type=int,
        nargs='+',
        help='(Deprecated, please use --gpu-id) ids of gpus to use '
        '(only applicable to non-distributed testing)')
    parser.add_argument(
        '--gpu-id',
        type=int,
        default=0,
        help='id of gpu to use '
        '(only applicable to non-distributed testing)')
    parser.add_argument(
        '--launcher',
        choices=['none', 'pytorch', 'slurm', 'mpi'],
        default='none',
        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)
    parser.add_argument('--device', help='device used for testing')
    parser.add_argument(
        '--output-prediction',
        help='where to save prediction in csv file',
        default=False)
    parser.add_argument('--savepng', type=str, default='ft.png')
    parser.add_argument('--title', type=str, default='')
    parser.add_argument('--dataset', type=str, default='')
    args = parser.parse_args()
    if 'LOCAL_RANK' not in os.environ:
        os.environ['LOCAL_RANK'] = str(args.local_rank)

    assert args.metrics or args.out, \
        'Please specify at least one of output path and evaluation metrics.'

    return args

def calculate_ece(confidences, predictions, labels, num_bins=10):
    bin_boundaries = np.linspace(0, 1, num_bins + 1)  # 分割为num_bins个间隔
    bin_confidences = np.zeros(num_bins)  # 存储每个间隔的置信度总和
    accuracy = np.zeros(num_bins)  # 存储每个间隔的准确率总和
    bin_sizes = np.zeros(num_bins)  # 存储每个间隔的样本数量

    n_samples = np.zeros(num_bins)
    for i in range(num_bins):
        mask = (confidences >= bin_boundaries[i]) & (confidences < bin_boundaries[i + 1])
        if len(confidences[mask])>0:
            bin_confidences[i] = np.mean(confidences[mask])
            accuracy[i] = np.mean(predictions[mask] == labels[mask])
        else:
            bin_confidences[i] = 0
            accuracy[i] = 0
        n_samples[i] = len(predictions[mask])

        bin_sizes[i] = np.sum(mask)

    ece = np.average(np.abs(accuracy - bin_confidences), weights=bin_sizes / np.sum(bin_sizes))
    return ece, bin_confidences, accuracy, n_samples/np.sum(n_samples)

def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)
    cfg.output_prediction = args.output_prediction
    # set multi-process settings
    setup_multi_processes(cfg)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None

    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids[0:1]
        warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
                      'Because we only support single GPU mode in '
                      'non-distributed testing. Use the first GPU '
                      'in `gpu_ids` now.')
    else:
        cfg.gpu_ids = [args.gpu_id]
    cfg.device = args.device or auto_select_device()

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    dataset = build_dataset(cfg.data.test, default_args=dict(test_mode=True))

    # build the dataloader
    # The default loader config
    loader_cfg = dict(
        # cfg.gpus will be ignored if distributed
        num_gpus=1 if cfg.device == 'ipu' else len(cfg.gpu_ids),
        dist=distributed,
        round_up=True,
    )
    # The overall dataloader settings
    loader_cfg.update({
        k: v
        for k, v in cfg.data.items() if k not in [
            'train', 'val', 'test', 'train_dataloader', 'val_dataloader',
            'test_dataloader'
        ]
    })
    test_loader_cfg = {
        **loader_cfg,
        'shuffle': False,  # Not shuffle by default
        'sampler_cfg': None,  # Not use sampler by default
        **cfg.data.get('test_dataloader', {}),
    }
    # the extra round_up data will be removed during gpu/cpu collect
    data_loader = build_dataloader(dataset, **test_loader_cfg)

    # build the model and load checkpoint
    model = build_classifier(cfg.model)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')

    if 'CLASSES' in checkpoint.get('meta', {}):
        CLASSES = checkpoint['meta']['CLASSES']
    else:
        from mmcls.datasets import ImageNet
        warnings.simplefilter('once')
        warnings.warn('Class names are not saved in the checkpoint\'s '
                      'meta data, use imagenet by default.')
        CLASSES = ImageNet.CLASSES

    if not distributed:
        model = wrap_non_distributed_model(
            model, device=cfg.device, device_ids=cfg.gpu_ids)
        if cfg.device == 'ipu':
            from mmcv.device.ipu import cfg2options, ipu_model_wrapper
            opts = cfg2options(cfg.runner.get('options_cfg', {}))
            if fp16_cfg is not None:
                model.half()
            model = ipu_model_wrapper(model, opts, fp16_cfg=fp16_cfg)
            data_loader.init(opts['inference'])
        model.CLASSES = CLASSES
    else:
        model = wrap_distributed_model(
            model, device=cfg.device, broadcast_buffers=False)
        

    confidences = []  # 存储模型的置信度
    predictions = []  # 存储模型的预测结果
    labels = []  # 存储实际标签

    # 对数据集进行推断，获取置信度、预测结果和标签

    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, **data)
            scores = np.vstack(result)
            pred_score = np.max(scores, axis=1)
            pred_label = np.argmax(scores, axis=1)
            pred_class = [model.CLASSES[lb] for lb in pred_label]

        confidences.extend(pred_score)
        predictions.extend(pred_label)

    confidences = np.array(confidences)
    predictions = np.array(predictions)
    labels = dataset.get_gt_labels()

    # 计算ECE
    ece, bin_confidences, accuracy,n_samples = calculate_ece(confidences, predictions, labels, num_bins=10)


    avg_conf = np.average(bin_confidences, weights=n_samples)
    avg_acc = np.average(accuracy, weights=n_samples)

    xs = np.arange(0.0, 1.0, 0.1)
    ys = np.arange(0.0, 1.0, 0.1)
    # 可视化ECE
    mapped_bin_confidences = [int(confidence * 9) / 10 for confidence in bin_confidences]
    # Create a figure with two subplots
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(5, 8))



    # Plot the second bar plot in the second subplot
    ax1.bar(mapped_bin_confidences, n_samples, width=1/len(mapped_bin_confidences), align='edge', edgecolor='black', linewidth=1, alpha=1, color='blue', label='n_samples')
    # 在ax1子图中绘制竖线
    ax1.axvline(x=avg_conf, linestyle='--', color='gray', linewidth=2)
    ax1.axvline(x=avg_acc, linestyle='--', color='gray', linewidth=2)


    d_pos = np.sign(avg_conf-avg_acc)*0.05

    x_conf = avg_conf+d_pos
    x_acc = avg_acc-d_pos

    print(f'ece: {ece:.3f}')
    print(f'avg_acc: {avg_acc:.3f}')

    # ax1.annotate('Avg conf', xy=(avg_conf, 0), xytext=(x_conf, 0.5), ha='center', rotation='vertical',fontsize=16,color='gray')
    # ax1.annotate('Avg acc', xy=(avg_acc, 0), xytext=(x_acc, 0.5), ha='center', rotation='vertical',fontsize=16,color='gray')

    # ax1.set_ylabel('%Samples')
    # # ax1.set_title('Second Bar Plot')
    # ax1.set_xlim(0, 1)
    # ax1.set_ylim(0, 1)
    # ax1.legend()




    # # Plot the first bar plot in the first subplot
    # ax2.bar(xs, ys + 0.1, width=1/len(xs), align='edge', edgecolor='red', linewidth=1, alpha=0.2, color='red', label='Gap')
    # ax2.bar(mapped_bin_confidences, accuracy, width=1/len(mapped_bin_confidences), align='edge', edgecolor='black', linewidth=1, alpha=1, color='blue', label='Accuracy')
    # ax2.plot([0, 1], [0, 1], '--', color='red')
    # ax2.set_xlabel('Confidence')
    # ax2.set_ylabel('Accuracy')

    # ax2.annotate(f'Error={ece:.3f}', xy=(0.7, 0.0), xytext=(0.7, 0.1), ha='center',fontsize=22, color='black')
    # rect = patches.Rectangle((0.4, 0.05), 0.58, 0.2, linewidth=1, edgecolor='black', facecolor='white', alpha=0.3)
    # ax2.add_patch(rect)
    # ax2.set_xlim(0, 1)
    # ax2.set_ylim(0, 1)
    # ax2.legend()




    # fig.suptitle(f'{args.title}\n{args.dataset}', fontsize=22,color='black')
    # plt.savefig(f'plots/{args.savepng}')



    

if __name__ == '__main__':
    main()
