import os
import sys
import numpy as np
import torch
import torch.utils.data
from data.apple_dataset import AppleDataset

from utility.coco_utils import get_coco_api_from_dataset
from utility.coco_eval import CocoEvaluator
import utility.utils as utils
import utility.transforms as T


# 获取数据转换流程
def get_transform(train):
    # 定义数据转换流程
    transforms = []
    transforms.append(T.ToTensor())  # 将图像转换为张量
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))  # 随机水平翻转，概率为 0.5
    return T.Compose(transforms)  # 组合所有转换


# 读取检测结果文件
def read_detections(file_path):
    # 检查文件是否存在
    if not os.path.isfile(file_path):
        print("无法读取检测文件 {}. 文件或目录不存在。".format(file_path))
        return None

    # 初始化检测结果字典
    detections = {}
    with open(file_path, 'r') as infile:
        for line in infile:
            # 解析每一行的内容
            im_name, x1, y1, x2, y2, score = line.rstrip().split(',')
            if im_name not in detections:
                # 如果图像名称不在字典中，初始化该图像的检测结果
                detections[im_name] = {
                    'boxes': np.empty((0, 4)),  # 边界框
                    'scores': np.array([float(score)]),  # 置信度分数
                    'labels': np.array([1])  # 标签（1 表示苹果）
                }
                # 添加边界框
                detections[im_name]['boxes'] = np.vstack((detections[im_name]['boxes'], [float(x1), float(y1), float(x2), float(y2)]))
            else:
                # 如果图像名称已存在，追加检测结果
                detections[im_name]['boxes'] = np.vstack((detections[im_name]['boxes'], [float(x1), float(y1), float(x2), float(y2)]))
                detections[im_name]['scores'] = np.concatenate((detections[im_name]['scores'], [float(score)]), axis=0)
                detections[im_name]['labels'] = np.concatenate((detections[im_name]['labels'], [1]), axis=0)

        # 将 NumPy 数组转换为 PyTorch 张量
        for im_name in detections:
            detections[im_name]['boxes'] = torch.from_numpy(detections[im_name]['boxes'])
            detections[im_name]['scores'] = torch.from_numpy(detections[im_name]['scores'])
            detections[im_name]['labels'] = torch.from_numpy(detections[im_name]['labels'])
        return detections


if __name__ == "__main__":
    # 获取输入和输出目录
    input_dir = sys.argv[1]
    output_dir = sys.argv[2]

    # 定义提交目录和真实标签目录
    submit_dir = os.path.join(input_dir, 'res')
    truth_dir = os.path.join(input_dir, 'ref')

    # 检查提交目录是否存在
    if not os.path.isdir(submit_dir):
        print("%s 不存在".format(submit_dir))

    # 如果提交目录和真实标签目录都存在，创建输出目录
    if os.path.isdir(submit_dir) and os.path.isdir(truth_dir):
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

    # 设置设备为 CPU
    device = torch.device('cpu')

    # 初始化日志记录器
    metric_logger = utils.MetricLogger(delimiter="  ")
    # 创建 AppleDataset 实例
    dataset = AppleDataset(os.path.join(truth_dir), get_transform(train=False))
    # 创建数据加载器
    data_loader = torch.utils.data.DataLoader(dataset, batch_size=1,
                                              shuffle=False, num_workers=1,
                                              collate_fn=utils.collate_fn)
    # 定义评估指标类型
    iou_types = ['bbox']

    # 获取 COCO API
    coco = get_coco_api_from_dataset(data_loader.dataset)
    # 初始化评估器
    coco_evaluator = CocoEvaluator(coco, iou_types)
    # 读取检测结果文件
    detections = read_detections(os.path.join(submit_dir, 'results.txt'))

    # 遍历数据加载器中的图像和目标
    for image, targets in data_loader:
        # 将目标转换为设备张量
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        # 获取图像 ID 和名称
        im_id = targets[0]['image_id'].item()
        im_name = data_loader.dataset.get_img_name(im_id)
        # 获取当前图像的检测结果
        outputs = [detections[im_name]]
        # 将检测结果转换为设备张量
        outputs = [{k: v.to(device) for k, v in t.items()} for t in outputs]

        # 构建结果字典
        res = {target['image_id'].item(): out for target, out in zip(targets, outputs)}
        # 更新评估器
        coco_evaluator.update(res)

    # 在多进程之间同步日志记录器
    metric_logger.synchronize_between_processes()
    print("平均统计信息:", metric_logger)
    # 在多进程之间同步评估器
    coco_evaluator.synchronize_between_processes()

    # 累积所有图像的预测结果
    coco_evaluator.accumulate()
    # 总结评估结果
    stats = coco_evaluator.summarize()

    # 定义输出文件路径
    outputfile_name = os.path.join(output_dir, 'scores.txt')
    # 打开输出文件
    output_file = open(outputfile_name, 'w')
    # 写入评估结果
    output_file.write("AP: {} \n".format(float(stats[0])))
    output_file.write("AP_0.5: {} \n".format(float(stats[1])))
    output_file.write("AP_0.75: {} \n".format(float(stats[2])))
    output_file.write("AP_small: {} \n".format(float(stats[3])))
    output_file.write("AP_medium: {} \n".format(float(stats[4])))
    output_file.write("AP_large: {} \n".format(float(stats[5])))
    # 关闭输出文件
    output_file.close()