import importlib
import math

from torch.backends import cudnn

from ultralytics.nn.autobackend import AutoBackend
from ultralytics.utils import ops

cudnn.enabled = True
import numpy as np
import torch
from torch.backends import cudnn

cudnn.enabled = True
import os.path
import cv2
from torchvision import transforms
import json
from tqdm import tqdm
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from models.common import DetectMultiBackend


transform = transforms.Compose([transforms.ToTensor()])

batch_size = 50
num_workers = 5

wsi_path = '/media/hsmy/wanghao_18T/dataset/MIDOG2021/tiff/'
midog_json = '/media/hsmy/wanghao_18T/dataset/MIDOG2021/MIDOG.json'
data = json.load(open(midog_json))
images_dict = {image["file_name"]: image["id"] for image in data["images"]}

device = torch.device('cuda:0')

is_level_10X = True
is_level_20X = False
patch_size_detect = 256
weights_detect = "/media/hsmy/wanghao_18T/code/yolov10/checkpoints/m21_10_256_stain_aug_align_f1_985.pt"
model_detect = AutoBackend(weights=weights_detect, device=device)
model_detect.eval()
model_detect.warmup(imgsz=(batch_size, 3, patch_size_detect, patch_size_detect))

patch_size_stage2 = 512
weights_stage2 = "/media/hsmy/wanghao_18T/code/gitee/yolo/runs/detect/m21_40_512_t/weights/best.pt"
model_stage2 = AutoBackend(weights=weights_stage2, device=device)
model_stage2.eval()
model_stage2.warmup(imgsz=(batch_size, 3, patch_size_stage2, patch_size_stage2))


class BuildDetectDataset(Dataset):
    def __init__(self, wsi, center_points, patch_size):
        self.wsi = wsi
        self.center_points = center_points
        self.patch_size = patch_size

    def __getitem__(self, index):
        [x_start, y_start] = self.center_points[index]
        x_end = x_start + self.patch_size
        y_end = y_start + self.patch_size
        img_item = self.wsi[y_start:y_end, x_start:x_end]
        img_item = img_item.transpose((2, 0, 1))
        return img_item, x_start, y_start

    def __len__(self):
        return len(self.center_points)


class BuildClassifyDataset(Dataset):
    def __init__(self, wsi, center_points, patch_size):
        self.wsi = wsi
        self.center_points = center_points
        self.patch_size = patch_size
        self.height, self.width, _ = wsi.shape

    def __getitem__(self, index):
        [center_x, center_y] = self.center_points[index]
        image = self.wsi
        H, W, C = image.shape
        x_center, y_center = center_x, center_y
        patch_height, patch_width = self.patch_size, self.patch_size

        # 计算块的边界
        x_start = x_center - patch_width // 2
        x_end = x_start + patch_width
        y_start = y_center - patch_height // 2
        y_end = y_start + patch_height

        # 创建填充后的块
        patch = np.zeros((patch_height, patch_width, C))

        # 计算图像与块之间的重叠区域
        x_start_clip = max(x_start, 0)
        x_end_clip = min(x_end, W)
        y_start_clip = max(y_start, 0)
        y_end_clip = min(y_end, H)

        # 计算填充块中的相对位置
        patch_x_start = max(0, -x_start)
        patch_x_end = patch_x_start + (x_end_clip - x_start_clip)
        patch_y_start = max(0, -y_start)
        patch_y_end = patch_y_start + (y_end_clip - y_start_clip)

        # 将重叠区域复制到填充块中
        patch[patch_y_start:patch_y_end, patch_x_start:patch_x_end, :] = image[y_start_clip:y_end_clip,
                                                                         x_start_clip:x_end_clip, :]
        img_item = Image.fromarray(np.uint8(patch))
        img_item = transform(img_item)
        return center_x, center_y, img_item

    def __len__(self):
        return len(self.center_points)


def xyxy2xy(x):
    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
    y[:, 0] = (x[:, 0] + x[:, 2]) / 2  # x center
    y[:, 1] = (x[:, 1] + x[:, 3]) / 2  # y center
    z = y[:, 0:2]
    return z


def postprocess(preds):
    if isinstance(preds, dict):
        preds = preds["one2one"]

    if isinstance(preds, (list, tuple)):
        preds = preds[0]

    preds = preds.transpose(-1, -2)
    bboxes, scores, labels = ops.v10postprocess(preds, 100, preds.shape[-1] - 4)
    bboxes = ops.xywh2xyxy(bboxes)
    preds = torch.cat([bboxes, scores.unsqueeze(-1), labels.unsqueeze(-1)], dim=-1)
    mask = preds[..., 4] > 0.001
    preds = [p[mask[idx]] for idx, p in enumerate(preds)]

    return preds

def postprocess2(preds):
    if isinstance(preds, dict):
        preds = preds["one2one"]

    if isinstance(preds, (list, tuple)):
        preds = preds[0]

    preds = preds.transpose(-1, -2)
    bboxes, scores, labels = ops.v10postprocess(preds, 3, preds.shape[-1] - 4)
    bboxes = ops.xywh2xyxy(bboxes)
    preds = torch.cat([bboxes, scores.unsqueeze(-1), labels.unsqueeze(-1)], dim=-1)
    mask = preds[..., 4] > 0.5
    preds = [p[mask[idx]] for idx, p in enumerate(preds)]

    return preds


def check_gt(x, y, annotations):
    for index, anno in enumerate(annotations):
        bbox_arr = anno["bbox"]
        t_x = bbox_arr[0] + 25
        t_y = bbox_arr[1] + 25
        distance = math.sqrt((x - t_x) ** 2 + (y - t_y) ** 2)
        if distance <= 20:
            return True, index
    return False, -1

wsi_infer_arr = []
for wsi_file in os.listdir(wsi_path):
    image_id = images_dict[wsi_file]
    # test_arr = [95, 69, 120, 28]
    # if image_id in test_arr:
    #     continue
    annotations = [anno for anno in data["annotations"] if
                   anno["image_id"] == image_id and anno["category_id"] == 1]
    if len(annotations) == 0:
        # print(f"{wsi_file} no mitosis")
        continue
    wsi_infer_arr.append(wsi_file)

ps, rs, f1s = [], [], []
d_ps, d_rs, d_f1s = [], [], []
for wsi_file in tqdm(sorted(wsi_infer_arr)):
    image_id = images_dict[wsi_file]
    annotations = [anno for anno in data["annotations"] if
                   anno["image_id"] == image_id and anno["category_id"] == 1]
    file_path = os.path.join(wsi_path, wsi_file)
    slide = cv2.imread(file_path)
    slide = cv2.cvtColor(slide, cv2.COLOR_BGR2RGB)
    height, width, _ = slide.shape
    # 10X /4 | 20X /2
    if is_level_10X:
        img_10x = cv2.resize(slide, (width // 4, height // 4), interpolation=cv2.INTER_NEAREST)
    elif is_level_20X:
        img_10x = cv2.resize(slide, (width // 2, height // 2), interpolation=cv2.INTER_NEAREST)
    else:
        img_10x = slide

    height_10x, width_10x, _ = img_10x.shape

    num = 0
    detect_points = []
    for y in range(0, height_10x, patch_size_detect):
        if y + patch_size_detect > height_10x:
            y = height_10x - patch_size_detect  # 保证patch规格，超出边界往前推
        for x in range(0, width_10x, patch_size_detect):
            if x + patch_size_detect > width_10x:
                x = width_10x - patch_size_detect
            point = (x, y)
            detect_points.append(point)
            num += 1

    # detection 候选细胞检测
    classify_points = []
    dataset = BuildDetectDataset(img_10x, detect_points, patch_size_detect)
    data_loader = DataLoader(dataset, batch_size=batch_size)

    matched = []
    tp, fp, fn = 0, 0, 0
    for index, (imgs, start_x_arr, start_y_arr) in enumerate(data_loader):
        im = imgs.to(device, non_blocking=True)
        im = im.float() / 255
        preds = model_detect(im)
        preds = postprocess(preds)
        for i, (det, img, x, y) in enumerate(zip(preds, imgs, start_x_arr, start_y_arr)):
            if len(det):
                for *xyxy, conf, cls in reversed(det):
                    xy = (xyxy2xy(torch.tensor(xyxy).view(1, 4))).view(-1).tolist()
                    output_x = xy[0] + x
                    output_y = xy[1] + y
                    point = [int(output_x), int(output_y)]
                    if point[1] > width_10x or point[0] > height_10x or point[0] < 0 or point[1] < 0:
                        continue
                    if is_level_10X:
                        point = [p * 4 for p in point]
                    elif is_level_20X:
                        point = [p * 2 for p in point]
                    classify_points.append(point)

    # 有丝分裂分类推理
    classify_dataset = BuildClassifyDataset(slide, classify_points, patch_size_stage2)
    classify_data_loader = DataLoader(classify_dataset, batch_size=batch_size)

    matched = []
    tp, fp, fn = 0, 0, 0
    for index, (x_centers, y_centers, imgs_40x) in enumerate(classify_data_loader):
        im = imgs_40x.to(device, non_blocking=True)
        im = im.float() / 255
        preds = model_stage2(im)
        preds = postprocess2(preds)
        for (x, y, img_40x, pred) in zip(x_centers, y_centers, imgs_40x, preds):
            if len(pred):
                for *xyxy, conf, cls in reversed(pred):
                    xy = (xyxy2xy(torch.tensor(xyxy).view(1, 4))).view(-1).tolist()
                    output_x = xy[0] + x - 256
                    output_y = xy[1] + y - 256
                    gt, match_index = check_gt(output_x, output_y, annotations)
                    if gt:
                        if match_index not in matched:
                            tp += 1
                            matched.append(match_index)
                    else:
                        fp += 1

    fn += (len(annotations) - len(matched))
    p = tp / (tp + fp) if tp + fp > 0 else 0
    r = tp / (tp + fn) if tp + fn > 0 else 0
    f1 = 2 * p * r / (p + r) if p + r > 0 else 0
    ps.append(p)
    rs.append(r)
    f1s.append(f1)
    print(f"{wsi_file} 一共{num}张patch, 一共{len(classify_points)}个检测点  p: {format(p, '.3f')} r: {format(r, '.3f')} f1: {format(f1, '.3f')}")

# 计算avg f1
print(f"avg p: {format(np.mean(ps), '.3f')} r: {format(np.mean(rs), '.3f')} f1: {format(np.mean(f1s), '.3f')}")
