import os
import random
from typing import Dict, Tuple
from scipy.ndimage import label
from skimage.segmentation import watershed
from lib.models_star import Net
import torch
from glob import glob
from PIL import Image
import torchvision.transforms.functional as TF
from lib.metrics import all_in_one
import numpy as np
import matplotlib.pyplot as plt
from skimage.morphology import remove_small_objects
from scipy.ndimage.morphology import binary_fill_holes
from lib.utils import get_boundary_count, remap_label_fast
from datasets import datasets_pe as ds
from torch.utils.data import DataLoader
from tqdm import tqdm
import scipy.io as sio
from stardist import dist_to_coord
import cv2

def apply_watershed(distance_transform=None,
                prob_map=None,
                foreground_mask=None):
    marker = label(prob_map)[0]
    pred_inst_map = watershed(distance_transform,
                            markers=marker,
                            mask=foreground_mask,
                            compactness=0.01)
    return pred_inst_map

class ContrastEnhancer:

    def __init__(self, mode: str = "fix", alpha: float = 0.6, contrast_th: float = 0.16, white_th: float = 0.78125) -> None:
        assert mode in ["fix", "threshold", "dynamic"], "mode should be one of [fix, threshold, dynamic]"
        self.white_th    = white_th
        self.contrast_th = contrast_th
        self.alpha       = alpha
        self.mode        = mode

    def __call__(self, image: torch.Tensor) -> torch.Tensor:
        if self.mode == "fix":
            return TF.adjust_contrast(image, 1 + self.alpha)
        image_gray = image.mean(0)
        mask       = image_gray < self.white_th
        contrast   = image_gray[mask].std()
        if contrast < self.contrast_th:
            if self.mode == "threshold":
                return TF.adjust_contrast(image, 1 + self.alpha)
            return TF.adjust_contrast(image, 1 + self.alpha * (self.contrast_th - contrast))
        return image

class Predicter:

    def __init__(self, **config):
        test_conf = config["test"]
        self.model = Net(
            int(config["n_rays"]),
            with_pe=False,
            with_sff=config["with_sff"],
            ratio=int(config["ratio"]),
        ).cuda()
        self.image_paths = sorted(glob(test_conf["images_path"]))
        self.label_paths = sorted(glob(test_conf["labels_path"]))
        self.th = float(test_conf["th"])
        self.remove_small = int(test_conf["remove_small"])
        self.show_pic = bool(test_conf["show_picture"])
        self.use_tta = bool(test_conf["use_tta"])
        self.dataset = config["dataset"]
        self.run_mode = config["run_mode"]
        self.save_path = config["save_path"]
        self.model_name = config["model_name"]
        self.data_dir = config["data_dir"]
        self.ce = ContrastEnhancer(
            test_conf["ce_mode"],
            test_conf["th_white"],
            test_conf["th_contrast"],
            test_conf["alpha_contrast"],
        )
        self.with_ce = bool(test_conf["with_ce"])
        self.n_ray = int(config["n_rays"])
        self.with_sff = bool(config["with_sff"])
        self.with_pe = bool(config["with_pe"])
        self.ratio = int(config["ratio"])
        self.bd_th = int(test_conf["bd_th"])

    def __call__(self) -> None:
        def get_metrics(dataset):
            metrics = {"AJI": [], "DICE2": [], "PQ": [], "DQ": [], "SQ": [], "DICE": []}
            for i in range(len(dataset)):
                img, *label = dataset.load_data_from_disk(i)
                img = self.ce(img) if self.with_ce else img
                img, label = img.unsqueeze(0).cuda(), label[0].cuda()
                m = self.predict(img, label)                
                if not isinstance(m, int):
                    for k, v in m.items():
                        metrics[k].append(v)
            for k in metrics.keys():
                metrics[k] = sum(metrics[k]) / len(metrics[k])
            return metrics

        if self.run_mode == 'tt':
            data = getattr(ds, self.dataset)(
                base_dir=self.data_dir,
                re_gen=False,
                mode="test",
                load_from_memory=False,
            )
            self.model.load_state_dict(
                torch.load(
                    f"{self.save_path}/{self.model_name.replace('.pth', f'_{self.n_ray}rays.pth')}"
                )
            )
            self.model.eval()
            return get_metrics(data)
        else:
            metrics = {"AJI": [], "DICE2": [], "PQ": [], "DQ": [], "SQ": [], "DICE": []}
            # for t, v, test in [(1, 2, 3), (1, 3, 2)]:
            for t, v, test in [(1, 2, 3), (1, 3, 2), (2, 1, 3), (2, 3, 1), (3, 1, 2), (3, 2, 1)]:
                data = getattr(ds, self.dataset)(
                    base_dir=self.data_dir,
                    re_gen=False,
                    mode="test",
                    load_from_memory=False,
                    fold_id=test,
                )
                dict = torch.load(f"{self.save_path}/{t}{v}{test}/{self.model_name}")
                # keys_to_delete = [k for k in dict.keys() if 'SFFs' in k]
                # for k in keys_to_delete:
                #     del dict[k]
                self.model.load_state_dict(
                    dict,
                    strict=False,
                )
                self.model.eval()
                m = get_metrics(data)
                for k, v in m.items():
                    metrics[k].append(v)
                print(m)
            for k in metrics.keys():
                metrics[k] = sum(metrics[k]) / len(metrics[k])
            return metrics

    @torch.no_grad()
    def tta(self, img) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        temp_pred = []
        pred, temp = self.model(img)
        temp_dist, temp_star = temp[:, 0, :, :], temp[:, 1:, :, :]
        temp_pred.append(pred)
        for i in range(1, 4):
            pred, _ = self.model(img.rot90(i, (-2, -1)))
            pred = pred.rot90(-i, (-2, -1)).softmax(dim=1)
            temp_pred.append(pred)
        return torch.mean(torch.stack(temp_pred), dim=0), temp_dist, temp_star

    def get_contour(self, dists):
        coord = dist_to_coord(np.transpose(dists, (1, 2, 0)))
        coord = coord.transpose(-1, 0, 1, 2).astype(int)
        mask = np.zeros((coord.shape[1], coord.shape[2]), dtype=np.uint16)
        return get_boundary_count(coord, mask, self.bd_th)

    @torch.no_grad()
    def predict(self, img, label):
        torch.cuda.empty_cache()
        h, w = img.shape[2], img.shape[3]
        img = TF.pad(img, (0, 0, 0, 8 - h % 8)) if h % 8 != 0 else img
        img = TF.pad(img, (0, 0, 8 - w % 8, 0)) if w % 8 != 0 else img
        pred, prob, dist = self.tta(img) if self.use_tta else self.model(img)
        pred = pred.argmax(dim=1).squeeze(0, 1)[:h, :w].cpu().numpy()
        # boundary = np.where(pred == 2, 1, 0)
        prob = prob.squeeze(0)[:h, :w].cpu().numpy()
        dist = dist.squeeze(0)[:, :h, :w].cpu().numpy()
        pred = binary_fill_holes(pred.astype(bool))
        prob_marker = np.where(prob > self.th, 1, 0)
        distance_transform = self.get_contour(dist)
        # distance_transform = -prob
        distance_transform[prob > 0.25] = -10
        distance_transform[pred == 0] = -10
        # distance_transform = boundary | (boundary ^ distance_transform)
        # distance_transform = boundary
        pred_inst_map = apply_watershed(
            distance_transform=distance_transform,
            prob_map=prob_marker,
            foreground_mask=pred,
        )
        pred_inst_map = remove_small_objects(pred_inst_map, min_size=self.remove_small)
        pred_inst_map = torch.from_numpy(pred_inst_map).int().cuda()
        pred_inst_map = remap_label_fast(pred_inst_map)
        m = all_in_one(label, pred_inst_map)

        # 展示图片
        if self.show_pic:
            pred_inst_map = pred_inst_map.cpu().detach().numpy()
            unique_ids = np.unique(pred_inst_map)[1:]
            colors = [
                tuple(random.randint(50, 255) for _ in range(3))
                for _ in range(len(unique_ids))
            ]
            # Create a color-coded image
            color_image = np.zeros((label.shape[0], label.shape[1], 3), dtype=np.uint8)
            for i, id in enumerate(unique_ids):
                color_image[pred_inst_map == id] = colors[i]
            color_image[pred_inst_map == 0] = (0, 0, 0)
            plt.figure(figsize=(20, 16))
            plt.subplot(1, 3, 1)
            plt.imshow(color_image)
            plt.axis("off")
            plt.subplot(1, 3, 2)
            plt.imshow(distance_transform)
            plt.axis("off")
            plt.subplot(1, 3, 3)
            plt.imshow(label.bool().cpu().detach().numpy())
            plt.axis("off")
            plt.show()
        return m
