# -*- coding: utf-8 -*-
# ================================================================
#
#   Editor      : PyCharm
#   File name   : mAP_brine.py
#   Author      : CGump
#   Email       : huangzhigang93@qq.com
#   Created date: 2025/11/11 10:08
#
# ================================================================
import glob
import json
import os
import random
import shutil
import operator
import sys
import math
import cv2
import matplotlib.pyplot as plt
import numpy as np

import xml.etree.ElementTree as ET
from xml.dom.minidom import Document
from mAP import file_lines_to_list


headstr = """\
<annotation verified="no">
    <folder>JPEGImages</folder>
    <filename>%s</filename>
    <path>%s</path>
    <source>
        <database>Unknown</database>
    </source>
    <size>
        <width>%d</width>
        <height>%d</height>
        <depth>%d</depth>
    </size>
    <segmented>0</segmented>
"""

objstr = """\
    <object>
        <name>%s</name>
        <pose>Unspecified</pose>
        <truncated>0</truncated>
        <difficult>0</difficult>
        <bndbox>
            <xmin>%d</xmin>
            <ymin>%d</ymin>
            <xmax>%d</xmax>
            <ymax>%d</ymax>
        </bndbox>
    </object>
"""

tailstr = '''\
</annotation>
'''


def write_xml(xml_path, objstr, objs, headstr, head,  tail):
    print("write xml file: ", xml_path)
    with open(xml_path, 'w', encoding='utf-8') as f:
        f.write(headstr % (head[0], head[1], head[2], head[3], head[4]))
        for obj in objs:
            f.write(objstr % (obj[0], obj[1], obj[2], obj[3], obj[4]))
        f.write(tail)


def calc_iou(dr_box, gt_box):
    """
    计算两个矩形框的交并比
    Args:
        dr_box: detection_results的目标框坐标 x1 y1 x2 y2
        gt_box: ground_truths的目标框坐标 x1 y1 x2 y2
    Returns:
        iou: dr、gt的iou
    """
    intersection_box = [max(dr_box[0], gt_box[0]),
                        max(dr_box[1], gt_box[1]),
                        min(dr_box[2], gt_box[2]),
                        min(dr_box[3], gt_box[3])]
    intersection_w = intersection_box[2] - intersection_box[0]
    intersection_h = intersection_box[3] - intersection_box[1]
    if intersection_w > 0 and intersection_h > 0:
        dr_area = (dr_box[2] - dr_box[0]) * (dr_box[3] - dr_box[1])
        gt_area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1])
        intersection_area = intersection_w * intersection_h
        union_area = dr_area + gt_area - intersection_area
        return intersection_area / union_area
    else:
        return 0


def calc_distance(dr_box, gt_box):
    """
    计算两框的欧氏距离
    Args:
        dr_box: detection_results的目标框坐标 x1 y1 x2 y2
        gt_box: ground_truths的目标框坐标 x1 y1 x2 y2
    Returns:
        distance: dr、gt的像素距离
    """
    dr_cx = abs(dr_box[0] + dr_box[2]) // 2
    dr_cy = abs(dr_box[1] + dr_box[3]) // 2
    gt_cx = abs(gt_box[0] + gt_box[2]) // 2
    gt_cy = abs(gt_box[1] + gt_box[3]) // 2
    distance = np.sqrt((dr_cx - gt_cx)**2 + (dr_cy - gt_cy)**2)
    return distance


def nms(boxes, scores, threshold):
    """
    Args:
        boxes: 边界框坐标，格式为[N,4]（x1,y1,x2,y2）
        scores: 每个框的置信度，格式为[N]
        threshold: IOU阈值
    Returns:
        keep: 保留的框索引列表
    """
    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]

    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    order = scores.argsort()[::-1]  # 按置信度降序排序

    keep = []
    while order.size > 0:
        i = order[0]
        keep.append(i)

        # 计算当前框与其他框的IOU
        xx1 = np.maximum(x1[i], x1[order[1:]])
        yy1 = np.maximum(y1[i], y1[order[1:]])
        xx2 = np.minimum(x2[i], x2[order[1:]])
        yy2 = np.minimum(y2[i], y2[order[1:]])

        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        iou = inter / (areas[i] + areas[order[1:]] - inter)
        # 保留IOU低于阈值的框
        inds = np.where(iou <= threshold)[0]
        order = order[inds + 1]  # +1因为order[0]已被处理，映射回原列表，因为处理时是从1：开始

    return keep


def max_area(boxes):
    """
    Args:
        boxes: 单个目标的群框
    Returns:
        box: 最大的目标范围
    """
    # todo 用np.maximum
    # boe = np.array(boxes)
    # xmin = np.min(boe[:, 0])
    # ymin = np.min(boe[:, 1])
    # xmax = np.max(boe[:, 2])
    # ymax = np.max(boe[:, 3])

    l_min, t_min, r_max, b_max = 99999, 99999, -99999, -99999
    for box in boxes:
        x1, y1, x2, y2 = box
        l_min = min(l_min, x1)
        t_min = min(t_min, y1)
        r_max = max(r_max, x2)
        b_max = max(b_max, y2)
    return [l_min, t_min, r_max, b_max]


def analysis_xml(xml_path, class_name=None):
    """
    解析xml文件
    Args:
        xml_path: xml绝对路径
        class_name: 所需指定的类别
    Returns:

    """
    tree = ET.parse(xml_path)
    root = tree.getroot()
    ver = root.get('verified')
    objs = tree.findall('object')

    head = [tree.find('filename').text,
            tree.find('path').text,
            int(tree.find('size').find('width').text),
            int(tree.find('size').find('height').text),
            int(tree.find('size').find('depth').text)]

    xml_info = {'name': xml_path, 'labels': []}
    for i, obj in enumerate(objs):
        label = obj.find('name').text
        if class_name and label != class_name:
            continue
        xmlbox = obj.find('bndbox')
        bbox = [int(xmlbox.find('xmin').text),  # l
                int(xmlbox.find('ymin').text),  # t
                int(xmlbox.find('xmax').text),  # r
                int(xmlbox.find('ymax').text)]  # b
        # 类名 坐标列表 匹配计数位
        target = {'cls': label, 'bbox': [bbox], 'num': 0}
        xml_info['labels'].append(target)

    return ver, head, xml_info


def merge_gt_frame(gt_xml_list, save_path, class_list):
    """
    通过每帧的标注进行合并，不依赖于一个verified
    """
    head = []
    xml_anno = []
    base = random.randint(0, len(gt_xml_list)-1)
    for i, xml in enumerate(gt_xml_list):
        _, _head, xml_info = analysis_xml(xml)
        if i == base:
            head = _head
        xml_anno.append(xml_info)

    gt_box = []
    for label in class_list:
        label_box = []
        label_score = []  # 暂时先用分数来代替过滤好了
        for gt_anno in xml_anno:
            for gt_label in gt_anno["labels"]:
                if gt_label["cls"] == label:
                    label_box.append(gt_label['bbox'][0])
                    label_score.append(random.randrange(90, 99) * 0.01)

        label_boxes = np.array(label_box)
        label_score = np.array(label_score)
        ids = nms(label_boxes, label_score, 0.3)
        gt = [[label] + label_box[x] for x in ids]
        gt_box += gt

    if os.path.isdir(save_path):
        xml_path = os.path.join(save_path, head[0].replace('.jpg', '.xml'))
    else:
        xml_path = save_path
    write_xml(xml_path, objstr, gt_box, headstr, head, tailstr)


def merge_gt_field(gt_xml_list, save_path):
    """
    将单个视野下，所有帧的标注融合为一个当前视野下的标注文件，为人工检出结果标注
    每个帧比如滴虫的范围会有一定变化，因此会统计单个位置不同帧的位置然后给一个总体的大框
    需要有一帧通过labelimg标记为verified

    需要合并的合并，不需要合并的过滤
    Args:
        gt_xml_list: 单个视野的标注文件，全路径，列表
        save_path: 保存的路径
    Returns:
        None
    """
    verified = {'name': "", 'labels': []}
    head = []
    xml_anno = []

    for xml in gt_xml_list:
        # xml_path = os.path.join(gt_xml_dir, xml)
        ver, _head, xml_info = analysis_xml(xml)
        if ver == 'yes':
            verified = xml_info
            head = _head
        else:
            xml_anno.append(xml_info)
    # todo 最好是找到最佳verified帧后，从这个位置向两边做遍历，遍历一张合并一张
    gt_anno = verified['labels']
    for dr_anno in xml_anno:
        for gt_label in gt_anno:
            gt_box = gt_label['bbox'][0]
            for dr_label in dr_anno['labels']:
                dr_box = dr_label["bbox"][0]
                iou = calc_iou(dr_box, gt_box)
                if iou > 0.5:
                    gt_label['bbox'].append(dr_box)

    rst = []
    for item in verified['labels']:
        gt_bbox = max_area(item['bbox'])
        label = item['cls']
        rst.append([label] + gt_bbox)

    if os.path.isdir(save_path):
        xml_path = os.path.join(save_path, head[0].replace('.jpg', '.xml'))
    else:
        xml_path = save_path
    write_xml(xml_path, objstr, rst, headstr, head, tailstr)


def merge_gt(xml_list, save_path, ver_list, all_list):
    verified = {}  # {'name': "", 'labels': []}
    head, mid = [], []
    annotations = []
    base = random.randint(0, len(xml_list) - 1)
    for i, xml in enumerate(xml_list):
        ver, _head, xml_info = analysis_xml(xml)
        if i == base:
            mid = _head

        if ver == 'yes':
            verified = xml_info
            head = _head
        else:
            annotations.append(xml_info)
    if not head:
        head = mid

    rst1 = []
    for label in ver_list:
        for gt_label in verified['labels']:
            if gt_label['cls'] != label:
                continue
            gt_box = gt_label['bbox'][0]
            for dr_info in annotations:
                for dr_label in dr_info['labels']:
                    dr_box = dr_label['bbox'][0]
                    iou = calc_iou(dr_box, gt_box)
                    if iou > 0.5:
                        gt_label['bbox'].append(dr_box)
        # 统计每个标记类的框最大范围
        for gt_label in verified['labels']:
            if gt_label['cls'] != label:
                continue
            gt_bbox = max_area(gt_label['bbox'])
            label = gt_label['cls']
            rst1.append([label] + gt_bbox)

    rst2 = []
    for label in all_list:
        label_box = []
        label_score = []  # 暂时先用分数来代替过滤好了
        for dr_info in annotations:
            for dr_label in dr_info["labels"]:
                if dr_label["cls"] == label:
                    label_box.append(dr_label['bbox'][0])
                    label_score.append(random.randrange(90, 99) * 0.01)
        # 单类聚到一起计算
        label_boxes = np.array(label_box)
        label_score = np.array(label_score)
        ids = nms(label_boxes, label_score, 0.3)
        gt = [[label] + label_box[x] for x in ids]
        rst2 += gt

    print("rst1", rst1, "rst2", rst2)
    # 两个结果拼接起来，就是当前视野下的验证集输出
    rst = rst1 + rst2
    if os.path.isdir(save_path):
        xml_path = os.path.join(save_path, head[0].replace('.jpg', '.xml'))
    else:
        xml_path = save_path
    write_xml(xml_path, objstr, rst, headstr, head, tailstr)


def single_class_match(gt_file, dr_list, class_name, flag='iou', conf=0.1, threshold=0.5):
    """
    单个视野的检出、漏检、错检统计
    单类别检测结果，与人工检测结果的矩形框匹配
    Params:
        gt_file:
        dr_list:
        class_name: 所需要检测的类名
        flag: 匹配的模式，iou或dis
        conf: 置信度阈值
        threshold: 相对应匹配算法的的阈值， iou为0-1数，dis为大于0整数
    Returns:
        gt: 测试集人工检测结果标注框统计，格式 {图像，标注[{标签名 坐标x1y1x2y2 匹配计数} ...]}
        not_matched: 未匹配的目标位置，[[x1 y1 x2 y2 conf]...]
    """
    # 读取gt
    gt = analysis_xml(gt_file, class_name)[2]
    # todo 放在一起的结果txt，按样本-视野号进行解析，一个视野一个gt的xml，以及n帧的txt
    not_matched = []
    # 每个dr结果与gt做匹配
    for dr_txt in dr_list:
        lines = file_lines_to_list(dr_txt)
        # file_id = dr_txt.split(".txt", 1)[0]
        # file_id = os.path.basename(os.path.normpath(file_id))
        # dr_bboxes = []
        for line in lines:
            tmp_class_name, confidence, left, top, right, bottom = line.split()
            confidence = float(confidence)
            if tmp_class_name != class_name:
                # 不是目标类不要
                continue
            if confidence < conf:
                # 小于设定阈值，不要
                continue
            dr_box = [int(left), int(top), int(right), int(bottom)]

            match_flag = False
            for gt_label in gt['labels']:
                # 遍历一遍gt，与dr计算iou
                gt_box = gt_label['bbox'][0]
                if flag == 'iou':
                    iou = calc_iou(dr_box, gt_box)
                    # print(iou)
                    # cv2.rectangle(img, (dr_box[0], dr_box[1]), (dr_box[2], dr_box[3]), [255, 0, 255], thickness=3)
                    # cv2.rectangle(img, (gt_box[0], gt_box[1]), (gt_box[2], gt_box[3]), [0, 255, 0], thickness=3)
                    # cv2.imshow('result', img)
                    # cv2.waitKey(0)
                    if iou >= threshold:
                        gt_label['num'] += 1
                        match_flag = True  # 当前dr匹配上gt
                elif flag == 'dis':
                    dis = calc_distance(dr_box, gt_box)
                    if dis <= threshold:
                        gt_label['num'] += 1
                        match_flag = True  # 当前dr匹配上gt
                else:
                    raise KeyError(f"输入flag为{flag}，请选择iou和dis两种")
            if not match_flag:
                # 所有gt都与当前dr没有匹配
                dr_box.append(confidence)
                not_matched.append(dr_box)

    # det = np.array(list(set(map(tuple, not_matched))))  # 去重
    if not_matched:
        det = np.array(not_matched)
        ids = nms(det[:, 0:4], det[:, 4], 0.5)
        not_matched = [not_matched[x] for x in ids]
    # nt = det[ids]
    return gt, not_matched


def stat_sample(gt, not_matched, num_lmt=0):
    """
    Args:
        gt: 测试集人工检测结果标注框统计，格式 {图像，标注[{标签名 坐标x1y1x2y2 匹配计数} ...]}
        not_matched: 未匹配的目标位置，[[x1 y1 x2 y2 conf]...]
        num_lmt: 匹配数量阈值，每个gt框匹配了num_lmt以上个数的结果才会被视为正确检出
    Returns:
        list: 匹配人工检目标个数、未匹配人工检目标个数，错检个数
    """
    truth_matched = 0
    truth_unmatched = 0
    negative_target = len(not_matched)
    for gt_item in gt['labels']:
        if gt_item['num'] > num_lmt:
            truth_matched += 1
        else:
            truth_unmatched += 1
    return [truth_matched, truth_unmatched, negative_target]


def gt_general_per_frame():
    sample_num = 8
    sample_info = "qianxinanzhongyiyuan-lijinhui_20251024"
    sample_dir = rf"D:\work2\workspace\hzg\map-calculate\test\{sample_num}"
    sample_xml_dir = sample_dir + "_image_label"
    base_name = "{:s}_{:d}_{:d}_task_LTS-V900_normal_40x_{:d}_40x_{:03d}.xml"
    field_num = 2
    frame_num = 15
    for field in range(field_num):
        print("field: ", field + 1)
        xml_per_field = []
        for frame in range(frame_num):
            xml_file = base_name.format(sample_info, sample_num, sample_num, field+1, frame+1)
            xml_path = os.path.join(sample_xml_dir, xml_file)
            xml_per_field.append(xml_path)
        view_xml_path = os.path.join(sample_dir, f"field_{field+1}.xml")
        merge_gt(xml_per_field, view_xml_path, ver_list=[], all_list=["spore", "blastospore"])


def gt_general_per_field():
    sample_num = 23
    sample_info = "ceshi-zhu_20251110"
    sample_dir = rf"D:\work2\workspace\hzg\map-calculate\test\{sample_num}"
    sample_xml_dir = sample_dir + "_image_label"
    base_name = "{:s}_{:d}_{:d}_task_LTS-V900_normal_40x_{:d}_40x_{:03d}.xml"
    field_num = 3
    frame_num = 20
    for field in range(field_num):
        print("field: ", field + 1)
        xml_per_field = []
        for frame in range(frame_num):
            xml_file = base_name.format(sample_info, sample_num, sample_num, field+1, frame+1)
            xml_path = os.path.join(sample_xml_dir, xml_file)
            xml_per_field.append(xml_path)
        view_xml_path = os.path.join(sample_dir, f"field_{field+1}.xml")
        # merge_gt_field(xml_per_field, sample_dir)
        merge_gt(xml_per_field, view_xml_path, ver_list=["trichomonad"], all_list=[])


def main_distance():
    class_name = "spore"
    sample_num = 8
    sample_info = "qianxinanzhongyiyuan-lijinhui_20251024"
    sample_dir = rf"D:\work2\workspace\hzg\map-calculate\test\{sample_num}"

    dr_dir = rf"D:\work2\workspace\hzg\map-calculate\test\{sample_num}_image_detection-results"
    base_name = "{:s}_{:d}_{:d}_task_LTS-V900_normal_40x_{:d}_40x_{:03d}"
    field_num = 2
    frame_num = 15
    sample_result = []
    cv2.namedWindow('result', cv2.WINDOW_FREERATIO)
    for field in range(1, field_num + 1):
        # 拿到当前视野下的gt xml
        gt_field_xml = os.path.join(sample_dir, f"field_{field}.xml")
        img_path = analysis_xml(gt_field_xml)[1][1]
        # 拿到当前视野下的dr txt 列表
        dr_list = []
        for frame in range(1, frame_num + 1):
            txt_file = base_name.format(sample_info, sample_num, sample_num, field, frame) + '.txt'
            txt_path = os.path.join(dr_dir, txt_file)
            dr_list.append(txt_path)

        img = cv2.imdecode(np.fromfile(file=img_path, dtype=np.uint8), cv2.IMREAD_COLOR)
        gt, not_match = single_class_match(gt_field_xml, dr_list, class_name, 'dis', 0.1, 30)

        for g in gt["labels"]:
            box = g["bbox"][0]
            if g["num"] > 0:
                cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), [0, 255, 0], thickness=2)
            else:
                cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), [0, 255, 255], thickness=2)

        for d in not_match:
            cv2.rectangle(img, (d[0], d[1]), (d[2], d[3]), [0, 0, 255], thickness=2)

        cv2.imshow("result", img)
        cv2.waitKey(0)
        rst = stat_sample(gt, not_match)
        sample_result.append(rst)

    sample_result = np.array(sample_result)
    # 计算样本检出率 正确检出数量 / (正确检出 + 未检出正例）
    recall = np.sum(sample_result[:, 0]) / np.sum(sample_result[:, 0:2])
    # 计算样本的准确率 正确检出的样本 / 总检出
    precision = np.sum(sample_result[:, 0]) / np.sum(sample_result)
    # 漏检率
    fn_rate = 1 - recall
    # F1score
    f1score = 2 * recall * precision / (recall + precision)
    print(f"检出率：{recall}，准确率：{precision}，漏检率：{fn_rate}，F1score：{f1score}")


def main():
    class_name = "trichomonad"
    sample_num = 23
    sample_info = "ceshi-zhu_20251110"
    sample_dir = rf"D:\work2\workspace\hzg\map-calculate\test\{sample_num}"

    dr_dir = rf"D:\work2\workspace\hzg\map-calculate\test\{sample_num}_image_detection-results"
    base_name = "{:s}_{:d}_{:d}_task_LTS-V900_normal_40x_{:d}_40x_{:03d}"
    field_num = 3
    frame_num = 20

    sample_result = []
    cv2.namedWindow('result', cv2.WINDOW_FREERATIO)
    for field in range(1, field_num+1):
        # 拿到当前视野下的gt xml
        gt_field_xml = os.path.join(sample_dir, f"field_{field}.xml")
        img_path = analysis_xml(gt_field_xml)[1][1]
        # 拿到当前视野下的dr txt 列表
        dr_list = []
        for frame in range(1, frame_num+1):
            txt_file = base_name.format(sample_info, sample_num, sample_num, field, frame) + '.txt'
            txt_path = os.path.join(dr_dir, txt_file)
            dr_list.append(txt_path)
        gt, not_match = single_class_match(gt_field_xml, dr_list, class_name,'iou', 0.2, 0.5)

        img = cv2.imdecode(np.fromfile(file=img_path, dtype=np.uint8), cv2.IMREAD_COLOR)

        for g in gt["labels"]:
            box = g["bbox"][0]
            if g["num"] > 0:
                cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), [0, 255, 0], thickness=3)
            else:
                cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), [0, 255, 255], thickness=3)

        for d in not_match:
            cv2.rectangle(img, (d[0], d[1]), (d[2], d[3]), [0, 0, 255], thickness=3)
        cv2.imshow("result", img)
        cv2.waitKey(0)
        rst = stat_sample(gt, not_match)
        sample_result.append(rst)

    sample_result = np.array(sample_result)
    # 计算样本检出率 正确检出数量 / (正确检出 + 未检出正例）
    recall = np.sum(sample_result[:, 0]) / np.sum(sample_result[:, 0:2])
    # 计算样本的准确率 正确检出的样本 / 总检出
    precision = np.sum(sample_result[:, 0]) / np.sum(sample_result)
    # 漏检率
    fn_rate = 1 - recall
    # F1score
    f1score = 2 * recall * precision / (recall + precision)
    print(f"检出率：{recall}，准确率：{precision}，漏检率：{fn_rate}，F1score：{f1score}")


if __name__ == '__main__':
    # gt_general_per_frame()
    # gt_general_per_field()
    # main_distance()
    main()

    # cv2.namedWindow('result', cv2.WINDOW_FREERATIO)
    # img = cv2.imdecode(np.fromfile(file=r"D:\work2\workspace\hzg\map-calculate\test\23\23_检测任务\LTS-V900阴道分泌物检测仪\普通\40倍镜\1\40倍镜_010.jpg", dtype=np.uint8), cv2.IMREAD_COLOR)
    # gtt_file = r"D:\work2\workspace\hzg\map-calculate\test\23\23_检测任务\LTS-V900阴道分泌物检测仪\普通\40倍镜\1_label_old.xml"
    # drr_dir = r"D:\work2\workspace\hzg\map-calculate\test\23\23_检测任务\LTS-V900阴道分泌物检测仪\普通\40倍镜\1_detection-results"
    # drr_list = [os.path.join(drr_dir, item) for item in os.listdir(drr_dir)]
    # ground_truth, not_matched_dets = single_class_with_iou(gtt_file, drr_list, 'trichomonad', 0.1, iou_thresh=0.5)
    #
    # for d in not_matched_dets:
    #     cv2.rectangle(img, (d[0], d[1]), (d[2], d[3]), [0, 0, 255], thickness=3)
    #
    # stat_sample(ground_truth, not_matched_dets)
    #
    # cv2.imshow("result", img)
    # cv2.waitKey(0)