import os
import sys
import cv2
import numpy as np
import torch
import torch.utils.data
from data.apple_dataset import AppleDataset
from sklearn.metrics import confusion_matrix
from statistics import mean

import utility.utils as utils
import utility transforms as T


# BEGIN
def get_transform(train):
    """
    功能：根据是否训练模型返回相应的数据转换操作
    参数:
        train (bool): 指示是否在训练模型的布尔值
    返回:
        transforms (torchvision.transforms.Compose): 一个包含数据转换操作的组合对象
    """
    transforms = []
    transforms.append(T.ToTensor())  # 将图像转换为张量
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))  # 50%概率水平翻转图像
    return T.Compose(transforms)  # 返回组合后的数据转换操作
# END


# BEGIN

# 定义一个函数read_detections，接受一个文件路径作为参数
def read_detections(file_path):
    # 检查文件是否存在
    if not os.path.isfile(file_path):
        print("Could not read the detection file {}. No such file or directory.".format(file_path))

    # 初始化一个空字典detections用于存储检测结果
    detections = {}
    
    # 打开文件进行读取
    with open(file_path, 'r') as infile:
        # 逐行读取文件内容
        for line in infile:
            # 将每行内容按逗号分割并赋值给变量im_name, x1, y1, x2, y2, score
            im_name, x1, y1, x2, y2, score = line.rstrip().split(',')
            
            # 如果im_name不在detections中，则初始化对应的值
            if im_name not in detections:
                detections[im_name] = {'boxes': np.empty((0, 4)), 'scores': np.array([float(score)]), 'labels': np.array([1])}
                detections[im_name]['boxes'] = np.vstack((detections[im_name]['boxes'], [float(x1), float(y1), float(x2), float(y2)]))
            else:
                # 如果im_name已经在detections中，则将新的box和score添加到对应的列表中
                detections[im_name]['boxes'] = np.vstack((detections[im_name]['boxes'], [float(x1), float(y1), float(x2), float(y2)]))
                detections[im_name]['scores'] = np.concatenate((detections[im_name]['scores'], [float(score)]), axis=0)
 
                detections[im_name]['labels'] = np.concatenate((detections[im_name]['labels'], [1]), axis=0)

        # BEGIN
                # 遍历detections中的每个图像名称
                for im_name in detections:
                    # 将detections中每个图像的框转换为torch张量
                    detections[im_name]['boxes'] = torch.from_numpy(detections[im_name]['boxes'])
                    # 将detections中每个图像的分数转换为torch张量
                    detections[im_name]['scores'] = torch.from_numpy(detections[im_name]['scores'])
                    # 将detections中每个图像的标签转换为torch张量
                    detections[im_name]['labels'] = torch.from_numpy(detections[im_name]['labels'])
                # 返回转换后的detections
                return detections
        # END

    '''
 根据混淆矩阵计算评估指标。
:param confusion: 任意混淆矩阵
:return: 元组 (miou, fwiou, macc, pacc, ious, maccs)
'''
def computeMetrics(confusion):
 
    # 初始化
    labelCount = confusion.shape[0]  # 获取混淆矩阵的行数，即类别数
    ious = np.zeros((labelCount))  # 初始化IoU数组，用于存储每个类别的IoU
    maccs = np.zeros((labelCount))  # 初始化mAcc数组，用于存储每个类别的mAcc
    ious[:] = np.NAN  # 将IoU数组中的所有元素设为NaN
    maccs[:] = np.NAN  # 将mAcc数组中的所有元素设为NaN

    # 获取真正例、正预测和正真值
    total = confusion.sum()  # 计算混淆矩阵中所有元素的总和
    if total <= 0:
        raise Exception('Error: Confusion matrix is empty!')  # 如果混淆矩阵为空，则抛出异常
    tp = np.diagonal(confusion)  # 获取混淆矩阵对角线元素，即每个类别的真正例数
    posPred = confusion.sum(axis=0)  # 沿着列方向求和，得到每个类别的正预测数
    posGt = confusion.sum(axis=1)  # 沿着行方向求和，得到每个类别的正真值数

    # 检查哪些类别存在元素
    valid = posGt > 0  # 判断哪些类别存在正真值
    iousValid = np.logical_and(valid, posGt + posPred - tp > 0)  # 判断哪些类别可以计算IoU

    # 计算每个类别的结果和频率
    ious[iousValid] = np.divide(tp[iousValid], posGt[iousValid] + posPred[iousValid] - tp[iousValid])  # 计算每个类别的IoU
    maccs[valid] = np.divide(tp[valid], posGt[valid])  # 计算每个类别的mAcc
    freqs = np.divide(posGt, total)  # 计算每个类别的频率

    # 计算评估指标
# END
    miou = np.mean(ious[iousValid])
    fwiou = np.sum(np.multiply(ious[iousValid], freqs[iousValid]))
    macc = np.mean(maccs[valid])
    pacc = tp.sum() / total

        # 返回计算结果：miou, fwiou, macc, pacc, ious, maccs
    return miou, fwiou, macc, pacc, ious, maccs
    # END


# BEGIN
if __name__ == "__main__":
    # 检查是否作为主程序运行
    input_dir = sys.argv[1]  # 获取输入目录作为第一个命令行参数
    output_dir = sys.argv[2]  # 获取输出目录作为第二个命令行参数

    submit_dir = os.path.join(input_dir, 'res')  # 构建提交结果目录路径
    truth_dir = os.path.join(input_dir, 'ref')  # 构建真实结果目录路径

    if not os.path.isdir(submit_dir):
        print("%s does not exist".format(submit_dir))  # 如果提交结果目录不存在，则打印提示信息

    if os.path.isdir(submit_dir) and os.path.isdir(truth_dir):
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)  # 如果提交结果目录和真实结果目录均存在且输出目录不存在，则创建输出目录

    device = torch.device('cpu')  # 设置设备为CPU

    metric_logger = utils.MetricLogger(delimiter="  ")  # 初始化度量记录器
    dataset = AppleDataset(os.path.join(truth_dir), get_transform(train=False))  # 创建苹果数据集
    data_loader = torch.utils.data.DataLoader(dataset, batch_size=1,
                                              shuffle=False, num_workers=1,
                                              collate_fn=utils.collate_fn)  # 创建数据加载器

    # 在检测上评估数据集
    iou_types = ['bbox']
    mious = []
    fious = []
    mAcc = []
    pAcc = []
    ious = np.empty((0, 2))
    mAccs = np.empty((0, 2))
# END

# BEGIN
    for image, targets in data_loader:
        # 遍历数据加载器中的图像和目标

        # 加载真实标签和预测结果掩模
        im_id = targets[0]['image_id']
        im_name = data_loader.dataset.get_img_name(im_id)

        gt_mask = targets[0]['masks'].numpy()
        temp = np.zeros(gt_mask.shape[1:])
        temp[np.any(gt_mask, axis=0)] = 1
        gt_mask = temp

        pred_img = cv2.imread(os.path.join(args.pred_path, im_name), 0)
        pred_img = np.floor_divide(pred_img, 255)

        if pred_img.shape != (1280, 720):
            pred_img = cv2.resize(pred_img, (720, 1280), interpolation=cv2.INTER_NEAREST)

        # 计算混淆矩阵
        confusion = confusion_matrix(gt_mask.flatten(), pred_img.flatten())
        
        # 计算评估指标
        miou, fwiou, macc, pacc, iou, maccs = computeMetrics(confusion)
        
        # 将评估指标添加到相应的列表中
        mious.append(miou)
        fious.append(fwiou)
        mAcc.append(macc)
        pAcc.append(pacc)
        ious = np.vstack((ious, iou))
        mAccs = np.vstack((mAccs, maccs))
# END

# BEGIN
    # 打印分割结果
    print("Segmentation results:")
    
    # 打印平均IoU
    print("Mean IoU: {}".format(mean(mious)))
    
    # 打印平均频权IoU
    print("Mean frequency weighted IoU: {}".format(mean(fious)))
    
    # 打印平均准确率
    print("Mean Accuracy: {}".format(mean(mAcc)))
    
    # 打印像素准确率
    print("Pixel Accuracy: {}".format(mean(pAcc)))
    
    # 打印类别IoU
    print("Class IoU: {}".format(np.mean(ious, axis=0)))
    
    # 打印类别平均准确率
    print("Class Mean Accuracy: {}".format(np.mean(mAccs, axis=0)))

    # 设置输出文件名
    outputfile_name = os.path.join(output_dir, 'scores.txt')
    
    # 打开输出文件
    output_file = open(outputfile_name, 'w')
    
    # 写入IoU到文件
    output_file.write("IoU: {} \n".format(float(mean(mious))))
    
    # 写入频权IoU到文件
    output_file.write("fwIoU: {} \n".format(float(mean(fious))))
    
    # 写入平均准确率到文件
    output_file.write("mAcc: {} \n".format(float(mean(mAcc))))
    
    # 写入像素准确率到文件
    output_file.write("pAcc: {} \n".format(float(mean(pAcc))))
    
    # 写入类别IoU到文件
    output_file.write("cIoU: {} \n".format(float(np.mean(ious, axis=0))))
    
    # 写入类别平均准确率到文件
    output_file.write("cAcc: {} \n".format(float(np.mean(mAccs, axis=0))))
    
    # 关闭输出文件
    output_file.close()
# END
