import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
from einops import rearrange


def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0.):
    warmup_schedule = np.array([])
    warmup_iters = warmup_epochs * niter_per_ep
    if warmup_epochs > 0:
        warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)

    iters = np.arange(epochs * niter_per_ep - warmup_iters)
    schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))

    schedule = np.concatenate((warmup_schedule, schedule))
    assert len(schedule) == epochs * niter_per_ep
    return schedule

def acc(output, target):
    """overall accuracy"""
    correct = torch.sum(output == target).float()
    total = torch.numel(output)
    return correct / (total + 1e-8)


def sen(output, target, eps=1e-8):
    """sensitivity = recall = TP / (TP + FN)"""
    tp = torch.sum(output * target)
    fn = torch.sum(target * (1 - output))
    return tp / (tp + fn + eps)

def precision(output, target, eps=1e-8):
    """Precision = TP / (TP + FP)"""
    tp = torch.sum(output * target)
    fp = torch.sum(output * (1 - target))
    return tp / (tp + fp + eps)

def f1_score(p, r, eps=1e-8):
    """F1 = 2 * (p * r) / (p + r)"""
    return 2 * p * r / (p + r + eps)


def Dice(output, target, eps=1e-3):
    inter = torch.sum(output * target,dim=(1,2,-1)) + eps
    union = torch.sum(output,dim=(1,2,-1)) + torch.sum(target,dim=(1,2,-1)) + eps * 2
    x = 2 * inter / union
    dice = torch.mean(x)
    return dice


def cal_metrics(output, target):
    """
    同时计算 dice1/2/3 + acc + sen1/2/3 + f11/2/3
    output: (b, num_class, d, h, w)  target: (b, d, h, w)
    """
    output = torch.argmax(output, dim=1)  # (b, d, h, w)

    # Dice（沿用原逻辑）
    dice1 = Dice((output == 3).float(), (target == 3).float())
    dice2 = Dice(((output == 1) | (output == 3)).float(),
                 ((target == 1) | (target == 3)).float())
    dice3 = Dice((output != 0).float(), (target != 0).float())

    # Accuracy
    acc_score = acc(output, target)

    # Sensitivity
    sen1 = sen((output == 3).float(), (target == 3).float())
    sen2 = sen(((output == 1) | (output == 3)).float(),
               ((target == 1) | (target == 3)).float())
    sen3 = sen((output != 0).float(), (target != 0).float())

    # Precision
    pre1 = precision((output == 3).float(), (target == 3).float())
    pre2 = precision(((output == 1) | (output == 3)).float(),
                     ((target == 1) | (target == 3)).float())
    pre3 = precision((output != 0).float(), (target != 0).float())

    f11 = f1_score(pre1, sen1)
    f12 = f1_score(pre2, sen2)
    f13 = f1_score(pre3, sen3)

    return dice1, dice2, dice3, acc_score, sen1, sen2, sen3, f11, f12, f13

def cal_binary_metrics(output, target):
    """
    将 WT/TC/ET 合并为“肿瘤 vs 背景”二分类，计算整体 Acc、Sen、F1。
    output: (b, num_class, d, h, w)  target: (b, d, h, w)
    """
    output = torch.argmax(output, dim=1)  # (b, d, h, w)

    # 合并所有前景（1,2,3）为 1，背景为 0
    pred_fg = (output != 0).float()
    true_fg = (target != 0).float()

    # 展平为 1D
    pred_fg = pred_fg.view(-1)
    true_fg = true_fg.view(-1)

    # 计算 TP, FP, FN, TN
    tp = (pred_fg * true_fg).sum()
    fp = (pred_fg * (1 - true_fg)).sum()
    fn = ((1 - pred_fg) * true_fg).sum()
    tn = ((1 - pred_fg) * (1 - true_fg)).sum()

    eps = 1e-8
    acc = (tp + tn) / (tp + tn + fp + fn + eps)
    sen = tp / (tp + fn + eps)
    pre = tp / (tp + fp + eps)
    f1 = 2 * pre * sen / (pre + sen + eps)

    return acc, sen, f1

# 向下兼容：原 cal_dice 保留，内部调用 cal_metrics 并只返回前 3 个值
def cal_dice(output, target):
    dice1, dice2, dice3, *_ = cal_metrics(output, target)
    return dice1, dice2, dice3


class Loss(nn.Module):
    def __init__(self, n_classes, weight=None, alpha=0.5):
        "dice_loss_plus_cetr_weighted"
        super(Loss, self).__init__()
        self.n_classes = n_classes
        self.weight = weight.cuda()
        # self.weight = weight
        self.alpha = alpha

    def forward(self, input, target):
        # print(torch.unique(target))
        smooth = 0.01

        input1 = F.softmax(input, dim=1)
        target1 = F.one_hot(target,self.n_classes)
        input1 = rearrange(input1,'b n h w s -> b n (h w s)')
        target1 = rearrange(target1,'b h w s n -> b n (h w s)')

        input1 = input1[:, 1:, :]
        target1 = target1[:, 1:, :].float()

        # 以batch为单位计算loss和dice_loss，据说训练更稳定，那我试试
        inter = torch.sum(input1 * target1)
        union = torch.sum(input1) + torch.sum(target1) + smooth
        dice = 2.0 * inter / union

        loss = F.cross_entropy(input,target, weight=self.weight)

        total_loss = (1 - self.alpha) * loss + (1 - dice) * self.alpha

        return total_loss


if __name__ == '__main__':
    torch.manual_seed(3)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    losser = Loss(n_classes=4, weight=torch.tensor([0.2, 0.3, 0.25, 0.25])).to(device)
    x = torch.randn((2, 4, 16, 16, 16)).to(device)
    y = torch.randint(0, 4, (2, 16, 16, 16)).to(device)
    print(losser(x, y))
    print(cal_dice(x, y))
