import utils
import math
from itertools import cycle
# import seaborn as sns
import numpy as np
import torch
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import roc_curve, auc, confusion_matrix, cohen_kappa_score, roc_auc_score, f1_score, recall_score, \
    precision_score, accuracy_score
from sklearn.preprocessing import label_binarize
import torch
import torch.nn.functional as F
import cv2
from timm.data import Mixup
from timm.utils import accuracy
from torch import nn
import umap
# from models.cross_entropy import SoftTargetCrossEntropy, FocalLoss
import pandas as pd
from sklearn.metrics import cohen_kappa_score
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np

# 定义FocalLoss类
class FocalLoss(nn.Module):
    def __init__(self, alpha=None, gamma=2.0, reduction='mean'):
        super(FocalLoss, self).__init__()
        self.gamma = gamma
        self.reduction = reduction
        self.alpha = alpha
        if alpha is not None:
            if isinstance(alpha, (list, np.ndarray)):
                self.alpha = torch.tensor(alpha)
            else:
                self.alpha = alpha
    
    def forward(self, inputs, targets):
        # 转换为概率
        ce_loss = F.cross_entropy(inputs, targets, reduction='none', weight=self.alpha)
        pt = torch.exp(-ce_loss)
        focal_loss = (1 - pt) ** self.gamma * ce_loss
        
        if self.reduction == 'mean':
            return focal_loss.mean()
        elif self.reduction == 'sum':
            return focal_loss.sum()
        else:
            return focal_loss


def train_one_epoch(model, criterion, optimizer, data_loader,
                    device, epoch: int, loss_scaler, max_norm: float = 0,
                    mixup_fn=None, amp=True, finetune=False
                    ):
    if finetune:
        model.train(not finetune)
    else:
        model.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.7f}'))
    header = 'Epoch: [{}]'.format(epoch)
    print_freq = math.ceil(len(data_loader) * 0.4)

    for samples, targets, _ in metric_logger.log_every(data_loader, print_freq, header, device):

        samples = samples.to(device, non_blocking=True)
        targets = targets.to(device, non_blocking=True)

        if mixup_fn is not None:
            samples, targets = mixup_fn(samples, targets)

        # with torch.cuda.amp.autocast(enabled=amp):  ######改前 use mixed precision if enabled
        with torch.amp.autocast('cuda', enabled=amp):  # use mixed precision if enabled

            outputs = model(samples)
            loss = criterion(outputs, targets)
            loss_value = loss.item()
            if not math.isfinite(loss_value):  # check for nan
                print("Loss is {}, stopping training".format(loss_value))
                raise ValueError("Loss is {}, stopping training".format(loss_value))

            optimizer.zero_grad()
            # this attribute is added by timm on one optimizer (adahessian)
            is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
            if amp:
                loss_scaler(loss, optimizer, clip_grad=max_norm,
                            parameters=model.parameters(), create_graph=is_second_order)
            else:
                loss.backward(create_graph=is_second_order)  # 使用二阶微分
                if max_norm is not None and max_norm != 0.0:  # clip_grad_norm_ 用于防止梯度爆炸
                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
                optimizer.step()
        torch.cuda.synchronize()  # wait for all reduce to finish

        metric_logger.update(loss=loss_value)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}



########################多分类
@torch.no_grad()
def evaluate(data_loader, model, criterion, device, distributed=True, amp=False,
             top_k=(1,), output_dir=None, is_eval=False):
    # criterion = torch.nn.CrossEntropyLoss()
    criterion = FocalLoss(alpha=[0.0330, 0.0633, 0.1675, 0.1166, 0.2079, 0.4117], gamma=1.5,
                          reduction='mean')  # R2F-PD六分类

    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'
    print_freq = math.ceil(len(data_loader) * 0.4)

    # switch to evaluation mode
    model.eval()

    id_codes_list = []  # 用于收集id_code

    outputs = []
    targets = []
    inputs = [] if is_eval else None

    for images, target, id_code in metric_logger.log_every(data_loader, print_freq, header, device):
        images = images.to(device, non_blocking=True)
        target = target.to(device, non_blocking=True)

        with torch.cuda.amp.autocast(enabled=amp):
            output = model(images)  # 直接获取最后一层的输出

            # 这里把每个batch的预测拼接到outputs列表
        append_data(outputs, output, distributed, concat_all_gather)
        append_data(targets, target, distributed, concat_all_gather)

        if not distributed or utils.is_main_process():
            # 只在主进程或单卡模式下收集
            id_codes_list.extend(id_code)  # 收集id_code

        if is_eval:
            append_data(inputs, images, distributed, concat_all_gather)

        torch.cuda.synchronize()

    outputs = torch.cat(outputs, dim=0)
    targets = torch.cat(targets, dim=0)

    if is_eval:
        inputs = torch.cat(inputs, dim=0).reshape(outputs.shape[0], -1)

    # 调用 criterion

    # 计算loss
    real_loss = criterion(outputs, targets)

    metric_logger.synchronize_between_processes()
    metric_logger.update(loss=real_loss.item())

    if len(top_k) > 1:
        real_acc1, real_acc5 = accuracy(targets, outputs, topk=top_k)
        metric_logger.meters['acc1'].update(real_acc1.item())
        metric_logger.meters['acc5'].update(real_acc5.item())
        print('* Acc@1 {top1.global_avg:.2f}% Acc@5 {top5.global_avg:.2f}% loss {losses.global_avg:.3f}'
              .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
    else:
        real_acc1, = accuracy(targets, outputs, topk=top_k)
        metric_logger.meters['acc1'].update(real_acc1.item())
        print('* Acc@1 {top1.global_avg:.2f}% loss {losses.global_avg:.3f}'
              .format(top1=metric_logger.acc1, losses=metric_logger.loss))

    if is_eval and utils.is_main_process():
        y_true, y_pred, y_score = process_model_output(targets, outputs)
        # y_score = y_score[:, 1]  # 提取正类的概率 2分类
        evaluate_performance(y_true, y_pred, y_score)
        generate_plots2(y_true, y_pred, y_score, outputs, inputs, output_dir)

        # 创建 DataFrame
        data_dict = {
            "id_code": id_codes_list,  # 关键: 在这里加上id_code
            "true_label": y_true,
            "pred_label": y_pred
        }
        num_classes = y_score.shape[1]  # 多分类任务的类别数
        for c in range(num_classes):
            data_dict[f"prob_class_{c}"] = y_score[:, c]

        df = pd.DataFrame(data_dict)
        # 保存到 CSV 文件，命名为 predictions.csv
        csv_path = output_dir / "predictions.csv"
        df.to_csv(csv_path, index=False)
        print(f"Predictions saved to {csv_path}")

    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}


def accuracy(y_true, y_pred, topk=(1,)):
    if y_pred.dim() == 1 or (y_pred.dim() > 1 and y_pred.shape[1] == 1):  # 二分类任务
        # 对于一维的 y_pred，需要直接使用 y_pred
        preds = torch.sigmoid(y_pred.squeeze())
        rounded_preds = torch.round(preds)
        correct = (rounded_preds == y_true).float()
        acc = [correct.sum() / len(correct) * 100]
        return acc
    else:  # 多分类任务
        maxk = max(topk)
        batch_size = y_true.size(0)

        _, pred = y_pred.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(y_true.view(1, -1).expand_as(pred))

        topk_accs = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            acc_k = correct_k.mul_(100.0 / batch_size)
            topk_accs.append(acc_k)
        return topk_accs


###############################################
def process_model_output(targets, outputs):
    y_true = targets.int().cpu().numpy()

    # 检查 outputs 的维度
    if outputs.dim() == 1 or (outputs.dim() == 2 and outputs.shape[1] == 1):  # 二分类
        y_score = torch.sigmoid(outputs.squeeze()).cpu().numpy()
        y_pred = np.round(y_score)
    else:  # 多分类
        y_score = torch.softmax(outputs, dim=1).cpu().numpy()
        y_pred = np.argmax(y_score, axis=1)

    return y_true, y_pred, y_score


def evaluate_performance(y_true, y_pred, y_score):

    """
    评估模型性能的函数，支持二分类和多分类场景

    参数:
    y_true : 真实标签
    y_pred : 预测标签
    y_score : 预测得分/概率

    返回:
    无返回值，直接打印各项评估指标
    """
    n_classes = len(np.unique(y_true))  # 获取类别总数
    if n_classes == 2:  # 二分类情况

        # 计算二分类的各项指标
        auc = roc_auc_score(y_true, y_score)  # AUC值
        f1 = f1_score(y_true, y_pred, pos_label=0)  # F1分数(负类)
        recall = recall_score(y_true, y_pred, pos_label=0)  # 召回率(负类)
        precision = precision_score(y_true, y_pred, pos_label=0)
        qwk = cohen_kappa_score(y_true, y_pred, weights='quadratic')
        print(
            f"Auc:{auc * 100:.2f} | F1:{f1 * 100:.2f}% | Recall:{recall * 100:.2f}% | Precision:{precision * 100:.2f}%| QWK:{qwk * 100:.2f}%")
    else:  # 多分类
        y_true_b = label_binarize(y_true, classes=range(n_classes)).flatten()
        auc = roc_auc_score(y_true_b, y_score.flatten(), average='macro')
        f1 = f1_score(y_true, y_pred, average='weighted')
        kappa = cohen_kappa_score(y_true, y_pred, weights='quadratic')
        qwk = kappa
        print(f"AUC:{auc * 100:.2f} | F1:{f1 * 100:.2f}% | Kappa:{kappa * 100:.2f}%| QWK:{qwk * 100:.2f}%")


#############################################################################
def generate_plots2(y_true, y_pred, y_score, outputs, inputs, output_dir):
    roc(y_true, y_score, output_dir)  ####################################
    fu_matrix(y_true, y_pred, output_dir)  ##########################

    plt.close()

################ROC曲线####################
# # Generate Grad-CAM heatmap for each class and overlay on original image
def roc(y_true, y_score, output_dir):
    # 检测是二分类还是多分类，并自动确定类别数
    if y_score.ndim == 1 or y_score.shape[1] == 1:  # 二分类
        fpr, tpr, _ = roc_curve(y_true, y_score)
        roc_auc = auc(fpr, tpr)
        plt.figure()
        plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (AUC = {roc_auc:.2f})')
    else:  # 多分类
        n_classes = y_score.shape[1]
        y_true = label_binarize(y_true, classes=range(n_classes))

        fpr = dict()
        tpr = dict()
        roc_auc = dict()
        for i in range(n_classes):
            fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_score[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])

        # 计算微观平均ROC曲线和AUC
        fpr["micro"], tpr["micro"], _ = roc_curve(y_true.ravel(), y_score.ravel())
        roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])

        # 计算宏观平均ROC曲线和AUC
        all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
        mean_tpr = np.zeros_like(all_fpr)
        for i in range(n_classes):
            mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
        mean_tpr /= n_classes
        fpr["macro"] = all_fpr
        tpr["macro"] = mean_tpr
        roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])

        # 绘制所有ROC曲线
        plt.figure()
        colors = cycle(['blue', 'red', 'green', 'orange', 'purple', 'brown'])
        for i, color in zip(range(n_classes), colors):
            plt.plot(fpr[i], tpr[i], color=color, lw=2, label=f'ROC curve of class {i} (AUC = {roc_auc[i]:.2f})')

        plt.plot(fpr["micro"], tpr["micro"],
                 label=f'micro-average ROC curve (AUC = {roc_auc["micro"]:.2f})',
                 color='deeppink', linestyle=':', linewidth=4)

        plt.plot(fpr["macro"], tpr["macro"],
                 label=f'macro-average ROC curve (AUC = {roc_auc["macro"]:.2f})',
                 color='navy', linestyle=':', linewidth=4)

    plt.plot([0, 1], [0, 1], 'k--', lw=2)
    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC Curve')
    plt.legend(loc="lower right")
    plt.savefig(str(output_dir / 'roc_auc.png'))
    plt.close()

    plt.show()###




#################混淆矩阵###################
def fu_matrix(y_true, y_pred, output_dir):
    cm = confusion_matrix(y_true, y_pred)
    plt.figure(figsize=(8, 6))
    cmap = plt.get_cmap('Blues')
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.colorbar()

    # 在混淆矩阵的每个单元格中添加数字标签
    thresh = cm.max() / 2.0
    for i in range(cm.shape[0]):
        for j in range(cm.shape[1]):
            plt.text(j, i, format(cm[i, j], 'd'),
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")

    tick_marks = np.arange(len(cm))
    plt.xticks(tick_marks, tick_marks)
    plt.yticks(tick_marks, tick_marks)
    plt.title('Confusion Matrix')
    plt.xlabel('Predicted Labels')
    plt.ylabel('True Labels')

    plt.savefig(output_dir / 'confusion_matrix.png')
    # 显式关闭图像，避免占用内存
    plt.close()  #####



def feature_extractor(model, inputs, layers):
    # 定义钩子函数
    def hook(module, inp, output):

        features.append(output)

    features = []
    hook_handles = []

    # 首先检查 layers 是否为 nn.ModuleList
    if isinstance(layers, nn.ModuleList) or hasattr(layers, '__iter__'):
        # 对 nn.ModuleList 中的每个层注册钩子
        for layer in layers:
            hook_handles.append(layer.register_forward_hook(hook))
    # 然后检查是否为单个 nn.Module
    elif isinstance(layers, nn.Module):
        # 如果是单个层，直接注册钩子
        hook_handles.append(layers.register_forward_hook(hook))
    else:
        raise TypeError("'layers' must be a nn.ModuleList or a single nn.Module or an iterable of nn.Modules")

    # 获取中间层输出
    model(inputs)
    flattened_outputs = [torch.flatten(output, start_dim=1) for output in features]
    final_output = torch.cat(flattened_outputs, dim=1)
    # final_output = torch.mean(torch.stack(flattened_outputs, dim=0), dim=0)

    # 移除所有的 hook
    for hook_handle in hook_handles:
        hook_handle.remove()

    return final_output


@torch.no_grad()
def append_data(data_list, data, distributed, concat_fn=None):
    if distributed:
        if concat_fn is not None:
            data = concat_fn(data)
        data_list.append(data)
    else:
        data_list.append(data)


def concat_all_gather(tensor):
    """
    Performs all_gather operation on the provided tensors.
    """
    tensors_gather = [torch.ones_like(tensor)
                      for _ in range(torch.distributed.get_world_size())]
    torch.distributed.all_gather(tensors_gather, tensor.contiguous(), async_op=False)

    output = torch.cat(tensors_gather, dim=0)
    return output
