import argparse
import math
import random
from typing import Dict

import cv2
import numpy as np
import torch
import torch.nn.functional as F
import triton
import triton.language as tl
from sklearn.decomposition import NMF
from torch import Tensor
import torchvision.transforms.functional as TF
from tqdm.auto import tqdm
import numba
import yaml


def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


class Logger:

    def __init__(self, name: str):
        self.name = name

    def log(self, msg: str):
        with open(self.name, "a") as f:
            f.write(msg + "\n")


def _assert_uint8_image(x):
    assert x.ndim == 3 and x.shape[-1] == 3 and x.dtype.type is np.uint8


def rgb_to_density(x):
    _assert_uint8_image(x)
    x = np.maximum(x, 1)
    return np.maximum(-1 * np.log(x / 255), 1e-6)


def density_to_rgb(x):
    return np.clip(255 * np.exp(-x), 0, 255).astype(np.uint8)


def rgb_to_lab(x):
    _assert_uint8_image(x)
    return cv2.cvtColor(x, cv2.COLOR_RGB2LAB)


def lab_to_rgb(x):
    _assert_uint8_image(x)
    return cv2.cvtColor(x, cv2.COLOR_LAB2RGB)


def extract_stains(x, subsample=128, l1_reg=0.001, tissue_threshold=200):
    """Non-negative matrix factorization 
    
    Let x be the image as optical densities with shape (N,3) 

    then we want to decompose it as 

    x = W * H 

    with
        W: stain values of shape (N, 2)
        H: staining matrix of shape (2, 3) 
        
    Solve it as 
    
    min (x - W * H)^2 + |H|_1 

    with additonal sparsity prior on the stains W 
    """
    _assert_uint8_image(x)

    model = NMF(n_components=2, init="random", random_state=0, alpha_W=l1_reg, alpha_H=0, l1_ratio=1)

    # optical density
    density = rgb_to_density(x)

    # only select darker regions
    tissue_mask = rgb_to_lab(x)[..., 0] < tissue_threshold

    values = density[tissue_mask]

    # compute stain matrix on subsampled values (way faster)
    model.fit(values[::subsample])

    H = model.components_

    # normalize rows
    H = H / np.linalg.norm(H, axis=1, keepdims=True)
    if H[0, 0] < H[1, 0]:
        H = H[[1, 0]]

    # get stains on full image
    Hinv = np.linalg.pinv(H)
    stains = density.reshape((-1, 3)) @ Hinv
    stains = stains.reshape(x.shape[:2] + (2, ))

    return H, stains


def stains_to_rgb(stains, stain_matrix):
    assert stains.ndim == 3 and stains.shape[-1] == 2
    assert stain_matrix.shape == (2, 3)
    return density_to_rgb(stains @ stain_matrix)


def augment_stains(x, amount_matrix=0.2, amount_stains=0.2, n_samples=1, subsample=128, rng=None):
    """ 
    create stain color augmented versions of x by 
    randomly perturbing the stain matrix by given amount

    1) extract stain matrix M and associated stains
    2) add uniform random noise (+- scale) to stain matrix
    3) reconstruct image 
    """
    _assert_uint8_image(x)
    if rng is None:
        rng = np.random

    M, stains = extract_stains(x, subsample=subsample)

    M = np.expand_dims(M, 0) + amount_matrix * rng.uniform(-1, 1, (n_samples, 2, 3))
    M = np.maximum(M, 0)

    stains = np.expand_dims(stains, 0) * (1 + amount_stains * rng.uniform(-1, 1, (n_samples, 1, 1, 2)))
    stains = np.maximum(stains, 0)

    if n_samples == 1:
        return stains_to_rgb(stains[0], M[0])
    else:
        return np.stack(tuple(stains_to_rgb(s, m) for s, m in zip(stains, M)), 0)


def stains_nomralize(x, stain_matrix):
    _assert_uint8_image(x)
    M, stains = extract_stains(x, subsample=128)
    return stains_to_rgb(stains, stain_matrix)


def oversample_classes(targets: Tensor, n_extra_classes=4, n_classes=7):

    class_count = torch.bincount(targets[:, 1, ::4, ::4].flatten(), minlength=n_classes)
    extra_classes = torch.argsort(class_count)[:n_extra_classes]

    n_extras = torch.sqrt(torch.sum(class_count[1:]) / class_count[extra_classes])
    n_extras = n_extras / torch.max(n_extras)

    idx_take = torch.arange(len(targets))

    for c, n_extra in zip(extra_classes, n_extras):
        prob = torch.sum(targets[:, 1, ::2, ::2] == c, dim=(1, 2))
        prob = torch.clamp(prob, 0, np.percentile(prob.numpy(), 99.8))
        prob = prob**2
        prob = prob / torch.sum(prob)
        n_extra = int(n_extra * len(targets))
        idx_extra = torch.multinomial(prob, n_extra, replacement=True)
        idx_take = torch.cat([idx_take, idx_extra])

    return idx_take


@torch.no_grad()
def remap_label_fast(label: Tensor, max_id: int = None):
    """
    If you ensure that the label‘s max value is not too big (like 1e6), you can use this function to remap label, it will be more faster than remap_label.
    Args:
        label: Tensor, shape is (H, W).
        max_id: int, the max value of label. If None, it will be label.max().
    Return:
        Tensor: remapped label.
    """

    if max_id is None:
        max_id = label.max().item()
    ids = label.unique()
    temp = torch.zeros(max_id + 1, dtype=label.dtype, device=label.device)
    temp[ids] = torch.arange(len(ids), dtype=label.dtype, device=label.device)
    return temp[label]

# 计算细胞核实例的坐标的核函数
@triton.jit
def mask_to_coord_triton_kernel(
    mask_ptr,
    coords,
    counts,
    mask_stride,
    coords_stride,
):
    r = tl.program_id(0)  # 取出行，为x坐标
    c = tl.program_id(1)  # 取出列，为y坐标
    index = r * mask_stride + c  # 计算像素点在一维数组中的索引
    mask_value = tl.load(mask_ptr + index)  # 取出像素点的值
    if mask_value == 0:  # 如果像素点的值为0，为背景，直接跳过
        return
    else:
        tl.atomic_add(counts + mask_value - 1, 1)  # 将x加到该实例的x坐标和上
        tl.atomic_add(coords + (mask_value - 1) * 2, r)  # 将y加到该实例的y坐标和上
        tl.atomic_add(coords + mask_value * 2 - 1, c)  # 将1加到该实例的像素点个数上


def mask_to_coord_triton(mask: Tensor) -> Tensor:
    max_value = mask.max()
    coords = torch.zeros((max_value, 2), dtype=torch.int32).cuda()
    counts = torch.zeros((max_value), dtype=torch.int32).cuda()

    mask_stride = mask.stride(0)
    coords_stride = coords.stride(0)
    grid = mask.shape
    mask_to_coord_triton_kernel[grid](mask, coords, counts, mask_stride, coords_stride)
    coords = coords / counts.unsqueeze(1)
    coords = coords.nan_to_num(0)
    return coords


@numba.jit(nopython=True)
def mask_to_coord_numba_kernel(mask: np.ndarray, coords: np.ndarray, counts: np.ndarray):
    for i in range(mask.shape[0]):
        for j in range(mask.shape[1]):
            id = mask[i, j]
            if id == 0:
                continue
            counts[id - 1] += 1
            coords[id - 1, 0] += j
            coords[id - 1, 1] += i
    return coords, counts


def mask_to_coord_numba(mask: np.ndarray) -> np.ndarray:
    max_value = mask.max()
    coords = np.zeros((max_value, 2), dtype=np.float32)
    counts = np.zeros((max_value), dtype=np.int32)
    coords, counts = mask_to_coord_numba_kernel(mask, coords, counts)

    return coords / counts[:, None]


def mask_to_coord(mask: Tensor) -> Tensor:
    '''
    用于计算每个细胞核实例的坐标
    Args:
        mask: 细胞核实例掩码，id从1开始，可以不连续，最大值为N
    Returns:
        coords: 细胞核实例坐标，(N, 2)，下标0时的的坐标为mask当中的id=1
    '''
    if mask.is_cuda:
        return mask_to_coord_triton(mask)
    return torch.from_numpy(mask_to_coord_numba(mask.numpy()))


def mask_to_coord_cpu(mask: Tensor) -> Tensor:
    '''
    用于计算每个细胞核实例的坐标
    Args:
        mask: 细胞核实例掩码，id从1开始，可以不连续，最大值为N
    Returns:
        coords: 细胞核实例坐标，(N, 2)，下标0时的的坐标为mask当中的id=1
    '''
    max_value = mask.max()
    coords = torch.zeros((max_value, 2), dtype=torch.float32)

    for id in mask.unique()[1:].tolist():
        mask_value = mask == id
        y, x = torch.nonzero(mask_value, as_tuple=True)
        coords[id - 1] = torch.tensor([x.float().mean(), y.float().mean()])
    return coords


# triton 核函数，用于计算每个细胞核实例的半径
@triton.jit
def mask_coord_to_radius_kernel(
    mask_ptr,
    coords,
    radius,
    mask_stride,
    coords_stride,
):
    r = tl.program_id(0)  # 取出行，为x坐标
    c = tl.program_id(1)  # 取出列，为y坐标
    index = r * mask_stride + c  # 计算像素点在一维数组中的索引
    mask_value = tl.load(mask_ptr + index)  # 取出像素点的值
    if mask_value == 0:  # 如果像素点的值为0，为背景，直接返回
        return
    else:
        x = tl.load(coords + (mask_value - 1) * 2)  # 取出细胞核实例中心的x坐标
        y = tl.load(coords + mask_value * 2 - 1)  # 取出细胞核实例中心的y坐标
        dist = tl.sqrt((x - r) * (x - r) + (y - c) * (y - c))  # 计算细胞核实例中心到像素点的距离
        tl.atomic_max(radius + mask_value - 1, dist)  # 将距离以取最大值的方式更新该实例的半径


def mask_coord_to_radius(mask: Tensor, coords: Tensor):
    '''
    用于计算每个细胞核实例的半径
    Args:
        mask: 细胞核实例掩码，id从1开始，可以不连续，最大值为N
        coords: 细胞核实例坐标，(N, 2)，下标0时的的坐标为mask当中的id=1
    Returns:
        radius: 细胞核实例半径，长度为N
    '''
    radius = torch.zeros((coords.shape[0]), dtype=torch.int32).cuda()
    mask_stride = mask.stride(0)
    coords_stride = coords.stride(0)
    grid = mask.shape
    mask_coord_to_radius_kernel[grid](mask, coords, radius, mask_stride, coords_stride)
    return radius


def mask_coord_to_radius_cpu(mask: Tensor, coords: Tensor):
    '''
    用于计算每个细胞核实例的半径
    Args:
        mask: 细胞核实例掩码，id从1开始，可以不连续，最大值为N
        coords: 细胞核实例坐标，(N, 2)，下标0时的的坐标为mask当中的id=1
    Returns:
        radius: 细胞核实例半径，长度为N
    '''
    radius = torch.zeros((coords.shape[0]), dtype=torch.float32)
    for id in mask.unique()[1:].tolist():
        mask_value = mask == id
        y, x = torch.nonzero(mask_value, as_tuple=True)
        x = x.float()
        y = y.float()
        x_mean = coords[id - 1, 0]
        y_mean = coords[id - 1, 1]
        dist = ((x - x_mean).pow(2) + (y - y_mean).pow(2)).sqrt()
        radius[id - 1] = dist.max()
    return radius


@triton.jit
def find_cls_kernel(
    mask_ptr,
    cls_ptr,
    temp_ptr,
    mask_stride,
    cls_stride,
    temp_stride,
):
    r = tl.program_id(0)
    c = tl.program_id(1)
    index = r * mask_stride + c
    mask_value = tl.load(mask_ptr + index)
    if mask_value == 0:
        return
    else:
        index = r * cls_stride + c * temp_stride + tl.arange(0, temp_stride)
        cls_value = tl.load(cls_ptr + index)
        index = mask_value * temp_stride + tl.arange(0, temp_stride)
        tl.atomic_add(temp_ptr + index, cls_value)


def find_cls(mask: Tensor, cls: Tensor, num_cls: int):
    # mask H, W
    # cls H, W, num_cls

    temp = torch.zeros((mask.max() + 1, num_cls), dtype=torch.float32).cuda()
    mask_stride = mask.stride(0)
    cls_stride = cls.stride(0)
    temp_stride = temp.stride(0)
    grid = mask.shape
    find_cls_kernel[grid](mask, cls, temp, mask_stride, cls_stride, temp_stride)
    temp = temp[:, 1:].argmax(1) + 1
    return temp[mask]


def get_aug(aug_list: list):
    noise = 'noise' in aug_list
    color = 'color' in aug_list
    stain = 'stain' in aug_list

    def flip_and_rot(*args: Tensor):
        args = list(args)
        for dim in [-1, -2]:
            args = [arg.flip(dim) for arg in args] if random.random() > 0.5 else args
        if r := random.randint(0, 3):
            args = [arg.rot90(r, [-1, -2]) for arg in args]
        return args

    def image_aug(image):
        if noise:
            if random.random() > 0.5:
                image = TF.gaussian_blur(image, 3)
            if random.random() > 0.5:
                ns = torch.rand_like(image)
                image = torch.where(ns < random.random() * 0.05, torch.zeros_like(image), image)
                image = torch.where(ns > 1 - random.random() * 0.05, torch.ones_like(image), image)
        # 颜色变换
        if color:
            image = TF.adjust_brightness(image, random.uniform(0.8, 1.2)) if random.random() > 0.5 else image
            image = TF.adjust_contrast(image, random.uniform(0.8, 1.2)) if random.random() > 0.5 else image
            image = TF.adjust_saturation(image, random.uniform(0.8, 1.2)) if random.random() > 0.5 else image

        # stain
        if stain:
            if random.random() > 0.5:
                try:
                    image_np = np.array(TF.to_pil_image(image))
                    image_np = augment_stains(image_np, 0.15, 0.2)
                    image = TF.to_tensor(image_np)
                except:
                    pass
        return image

    return flip_and_rot, image_aug


def get_train_process(aug_list: list):
    flip_and_rot, image_aug = get_aug(aug_list)

    def train_process(image: Tensor, *args: Tensor) -> Tensor:
        args = list(args)
        H, W = image.shape[-2:]
        if H > 256 or W > 256:
            crop_params = [random.randint(-128, H - 128), random.randint(-128, W - 128), 256, 256]
            image = TF.crop(image, *crop_params)
            for i in range(len(args)):
                args[i] = TF.crop(args[i], *crop_params)
        args = flip_and_rot(image, *args)
        args[0] = image_aug(args[0])
        return args

    def val_process(image: Tensor, *args: Tensor) -> Tensor:
        args = list(args)
        H, W = image.shape[-2:]
        if H > 256 or W > 256:
            image = TF.center_crop(image, (256, 256))
            for i in range(len(args)):
                args[i] = TF.center_crop(args[i], (256, 256))
        return image, *args

    return train_process, val_process


def focal_loss(
    output: Tensor,
    target: Tensor,
    gama: float = 2,
    weight=None,
    label_smoothing: float = 0.0,
):
    ce_loss = F.cross_entropy(
        output,
        target=target,
        reduction="none",
        weight=weight,
        label_smoothing=label_smoothing,
    )
    pt = torch.exp(-ce_loss)
    focal_loss = (1 - pt)**gama * ce_loss
    return focal_loss.mean()


def dice_loss(pred: Tensor, mask: Tensor):
    pred = pred.argmax(1).bool()
    smooth = 1e-6
    intersection = (pred & mask).sum((1, 2))
    union = pred.sum((1, 2)) + mask.sum((1, 2))
    dice = (2 * intersection + smooth) / (union + smooth)
    return 1 - dice.mean()


def parse_config_file(dataset_name: str) -> Dict:
    with open(f"config/{dataset_name}.yaml", "r") as file:
        config = yaml.safe_load(file)
    return config

def get_input_dataset() -> str:
    parser = argparse.ArgumentParser(description="Train a model")
    parser.add_argument("--ds", type=str, default="monuseg", help="input dataset name")

    args = parser.parse_args()
    dataset_name = args.ds.lower()

    if dataset_name not in ["monuseg", "lizard", "pannuke", 'consep', 'cpm17']:
        raise ValueError(f"Dataset {dataset_name} is not supported")

    if dataset_name == "monuseg":
        dataset_name = "MoNuSeg"
    elif dataset_name == "lizard":
        dataset_name = "Lizard"
    elif dataset_name == "pannuke":
        dataset_name = "Pannuke"
    elif dataset_name == "consep":
        dataset_name = "CoNSeP"
    else: dataset_name = "CPM17"
        
    return dataset_name


@numba.jit(nopython=True)
def get_boundary_count(coord: np.ndarray, mask: np.ndarray, th: int=2) -> np.ndarray:
    for i in range(coord.shape[0]):
        for j in range(coord.shape[1]):
            for k in range(coord.shape[2]):
                x, y = int(coord[i, j, k, 0]), int(coord[i, j, k, 1])
                if x <= 0 or x >= mask.shape[0] or y <= 0 or y >= mask.shape[1]:
                    continue
                mask[x, y] += 1
    return np.where(mask >= th, 10, -10)
