import torch
from torch import nn, optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader,Subset
import torchvision.models as models
from tqdm.notebook import tqdm
from torch.utils.data import Dataset
import numpy as np
import itertools

class MultiClassFocalLossWithAlpha(nn.Module):
    def __init__(self, device,alpha=[0.2, 0.3, 0.5], gamma=2.0, reduction='mean'):
        """
        :param alpha: 权重系数列表，三分类中第0类权重0.2，第1类权重0.3，第2类权重0.5
        :param gamma: 困难样本挖掘的gamma
        :param reduction:
        """
        super(MultiClassFocalLossWithAlpha, self).__init__()
        self.alpha = torch.tensor(alpha).to(device)
        #self.alpha = alpha
        self.gamma = gamma
        self.reduction = reduction

    def forward(self, pred, target):
        alpha = self.alpha[target]  # 为当前batch内的样本，逐个分配类别权重，shape=(bs), 一维向量
        log_softmax = torch.log_softmax(pred, dim=1) # 对模型裸输出做softmax再取log, shape=(bs, 3)
        logpt = torch.gather(log_softmax, dim=1, index=target.view(-1, 1))  # 取出每个样本在类别标签位置的log_softmax值, shape=(bs, 1)
        logpt = logpt.view(-1)  # 降维，shape=(bs)
        ce_loss = -logpt  # 对log_softmax再取负，就是交叉熵了
        pt = torch.exp(logpt)  #对log_softmax取exp，把log消了，就是每个样本在类别标签位置的softmax值了，shape=(bs)
        focal_loss = alpha * (1 - pt) ** self.gamma * ce_loss  # 根据公式计算focal loss，得到每个样本的loss值，shape=(bs)
        if self.reduction == "mean":
            return torch.mean(focal_loss)
        if self.reduction == "sum":
            return torch.sum(focal_loss)
        return focal_loss
    
class WeightedCrossEntropyLoss(nn.Module):
    def __init__(self):
        super(WeightedCrossEntropyLoss, self).__init__()
        self.cross_entropy_loss = nn.CrossEntropyLoss(reduction='mean')

    def forward(self, inputs, targets, weights=None):
        self.cross_entropy_loss.weight=weights
        loss = self.cross_entropy_loss(inputs, targets)
        return loss # 或者 loss.sum()

def update_class_weights(model, dataloaders, device, label_mapping_tensor,threshold=0.9, max_weight=9):
    # 获取训练集和验证集中所有唯一的标签
    all_labels =np.concatenate([dataloaders['train'].dataset.targets, dataloaders['val'].dataset.targets])
    unique_labels = np.unique(all_labels)
    unique_labels=label_mapping_tensor[unique_labels].cpu().numpy()
    
    # 初始化类别准确率和总样本数字典
    class_accuracies = {label.item(): 0.0 for label in unique_labels}
    total_samples = {label.item(): 0.0 for label in unique_labels}

    model.eval()  # Set the model to evaluation mode
    with torch.no_grad():
        for inputs, labels in dataloaders['val']:
            inputs, labels = inputs.to(device), labels.to(device)

            label_mapping_tensor=label_mapping_tensor.to(device)
            labels = label_mapping_tensor[labels]
            _, outputs = model(inputs)
            _, predicted = torch.max(outputs, 1)

            label_mapping_tensor=label_mapping_tensor.to(device)
            predicted = label_mapping_tensor[predicted]
            correct = (predicted == labels)
            for label in labels:
                class_accuracies[label.item()] += correct[labels == label].sum().item()
                total_samples[label.item()] += (labels == label).sum().item()

    # 避免除以零
    for label in unique_labels:
        total_samples[label] = max(total_samples[label], 1)
        class_accuracies[label] = class_accuracies[label] / total_samples[label]


    # 初始化类别权重字典
    class_weights = {label: 1.0 for label in unique_labels}
    overall_accuracy = sum(class_accuracies.values()) / len(class_accuracies)
    min_accuracy = min(class_accuracies.values())

    if min_accuracy < (overall_accuracy * threshold):

        for label in class_accuracies:
            if class_accuracies[label] < overall_accuracy * threshold:
                class_weights[label] = 1.0+(((overall_accuracy-class_accuracies[label])/overall_accuracy)*max_weight)
                #class_weights[label] = 1.0+(math.pow((overall_accuracy-class_accuracies[label])/overall_accuracy,2)*max_weight)


    # 创建一个与最大标签值大小一致的权重张量
    max_label = max(class_weights.keys())
    class_weights_tensor = torch.ones(max_label + 1)
    for label, weight in class_weights.items():
        class_weights_tensor[label] = weight

    return class_weights_tensor


def get_class_stats(dataloader):
    """
    从DataLoader中获取类别及其频率和样本数量的字典。
    
    参数:
    dataloader -- DataLoader实例。
    
    返回:
    class_stats -- 类别及其频率和样本数量的字典。
    """
    class_stats = {}
    total_samples = 0  # 总样本数
    for inputs, labels in dataloader:
        labels = labels.cpu().numpy()  # 将标签转换为numpy数组
        for label in labels:
            if label not in class_stats:
                class_stats[label] = {'count': 1, 'freq': 0}
            else:
                class_stats[label]['count'] += 1
            total_samples += 1  # 每次遇到一个标签，总样本数加1
    
    # 计算频率
    for label in class_stats:
        class_stats[label]['freq'] = class_stats[label]['count'] / total_samples
    
    return class_stats

def sort_and_complete_alpha_by_label_mapping(class_stats, label_mapping, task_class_orders):
    """
    根据类别统计字典、标签映射和任务类别顺序重新排序并补齐 alpha 值，并按类别和任务分组。
    
    参数:
    class_stats -- 类别及其频率和样本数量的字典。
    label_mapping -- 标签映射字典，其键为原始标签，值为新标签。
    task_class_orders -- 任务类别顺序列表，每个子列表包含属于同一任务的原始类别标签。
    
    返回:
    alpha_global -- 按类别分组的重新排序并补齐后的 alpha 张量。
    alpha_tasks -- 按任务分组的重新排序并补齐后的 alpha 张量列表。
    """
    epsilon = 1e-6
    total_samples = sum(class_stats[label]['count'] for label in class_stats)  # 总样本数
    #print(total_samples)
    mapped_classes = set(label_mapping.get(label, -1) for label in class_stats.keys())
    # 初始化全局 alpha 张量
    max_label = max(mapped_classes)
    #print(max_label)
    alpha_global = np.ones(max_label + 1)
    # 根据类别频率计算全局 alpha 值
    for label, stats in class_stats.items():
        mapped_label = label_mapping.get(label, -1)
        if mapped_label != -1:
            alpha_global[mapped_label] = 1/(stats['freq']+epsilon)
    
    existing_task_indices = []
    
    # 遍历任务类别顺序列表，使用enumerate获取任务索引和任务内的类别标签
    for task_index, task_classes in enumerate(task_class_orders):
        # 检查任务中的类别是否至少有一个存在于class_stats中
        if any(label in class_stats for label in task_classes):
            existing_task_indices.append(task_index)
    #print(existing_task_indices)

    
    # 最后，我们找到最大的任务编号
    max_task_number_in_class_stats = existing_task_indices[-1]

    # 初始化 alpha 张量列表
    alpha_tasks = np.ones(max_task_number_in_class_stats + 1)

    # 遍历每个任务的类别顺序
    for i in existing_task_indices:
        task_classes = task_class_orders[i]

        # 计算当前任务的总样本数
        task_total_samples = sum(class_stats[label]['count'] for label in task_classes if label in class_stats)
        task_freq=task_total_samples/total_samples
        alpha_tasks[i]=1/(task_freq+epsilon)


    return alpha_global, alpha_tasks