import torch
from torch import nn
import numpy as np
from tqdm.notebook import tqdm
from .criterian import update_class_weights
def train_one_epoch(model,old_model,dataloader, criterion,criterion_label,weight_label,optimizer, device, pbar,label_mapping_tensor, task_label_mapping_tensor,class_weights=None):
    model.train()
    running_loss = 0.0
    running_corrects = 0
    label_mapping_tensor=label_mapping_tensor.to(device)
    task_label_mapping_tensor=task_label_mapping_tensor.to(device)

    if class_weights is not None:
        class_weights=class_weights.to(device)

            
    # 获取训练集和验证集中所有唯一的标签
    all_labels = dataloader.dataset.targets
    unique_labels = np.unique(all_labels)
    unique_labels=label_mapping_tensor[unique_labels]
    class_corrects = {label.item(): 0 for label in unique_labels}
    class_samples = {label.item(): 0 for label in unique_labels}
    
    for inputs, rwa_labels in dataloader:
        loss=0.0
        inputs, rwa_labels = inputs.to(device), rwa_labels.to(device)
        optimizer.zero_grad()

        _, outputs,tsk_outputs,all_gate_weights = model(inputs)
        #print(f"all_gate_weights shape: {all_gate_weights.shape}")
        _, _,_,old_all_gate_weights = old_model(inputs)
        #print(f"old_all_gate_weights shape: {old_all_gate_weights.shape}")


        num_old_rows = old_all_gate_weights.size(1)
        #print(f"num_old_rows: {num_old_rows}")

        # 然后，我们从all_gate_weights中取出前num_old_rows行

        #print(f"corresponding_gate_weights shape: {all_gate_weights[:,:num_old_rows].shape}")
        #print(f"old_all_gate_weights shape: {old_all_gate_weights.shape}")

        # 计算蒸馏损失
        distillation_loss = nn.MSELoss()(all_gate_weights[:,:num_old_rows], old_all_gate_weights)

        #print(f"tsk_outputs shape: {tsk_outputs.shape}")
        labels = label_mapping_tensor[rwa_labels]
        #print("task_label_mapping_tensor",task_label_mapping_tensor)
        task_labels = task_label_mapping_tensor[rwa_labels]
        #print(f"Task Labels: {task_labels.cpu().numpy()}")
        #print(f"Labels: {labels.cpu().numpy()} | Task Labels: {task_labels.cpu().numpy()}|Task Outputs: {tsk_outputs.detach().cpu().numpy()}")
        loss+=distillation_loss*50
        #print(distillation_loss)
        if criterion is not None:
            if class_weights is not None:
                loss += criterion(outputs, labels, weights=class_weights)
            else:
                loss += criterion(outputs, labels)
        #print('tsk_outputs',tsk_outputs)
        #print('task_labels',task_labels)
        #print(criterion_label)
        if criterion_label is not None:
            loss=loss+weight_label*criterion_label(tsk_outputs,task_labels)
        loss.backward()
        optimizer.step()

        running_loss += loss
        _, predicted = torch.max(outputs, 1)
        _, tsk_predicted = torch.max(tsk_outputs, 1)
        #print(f"tsk_predicted: {tsk_predicted.cpu().numpy()}")
        running_corrects += (predicted == labels).sum()
        
        # 更新每个类别的正确预测数和样本数
        for label in labels:
            class_corrects[label.item()] += (predicted[labels == label] == label).sum()
            class_samples[label.item()] += (labels == label).sum().item()

        pbar.update(1)
        # 打印labels和task_labels
        #print(f"Labels: {labels.cpu().numpy()} | Task Labels: {task_labels.cpu().numpy()}")

    avg_train_loss = running_loss / len(dataloader)
    accuracy = running_corrects / len(dataloader.dataset)
    
    # 将字典转换为tensor以便返回
    class_corrects_tensor = torch.tensor([class_corrects.get(i, 0) for i in range(dataloader.dataset.num_classes)])
    class_samples_tensor = torch.tensor([class_samples.get(i, 0) for i in range(dataloader.dataset.num_classes)])

    return avg_train_loss, accuracy, class_corrects_tensor, class_samples_tensor

def validate_one_epoch(model, dataloader, criterion, label_mapping_tensor, task_label_mapping_tensor,device):
    model.eval()
    running_loss = 0.0
    running_corrects = 0
    # 在循环开始之前生成一个用来变换的tensor     
    label_mapping_tensor=label_mapping_tensor.to(device)
    task_label_mapping_tensor=task_label_mapping_tensor.to(device)

    all_labels = dataloader.dataset.targets
    #print(f"All labels: {all_labels}")
    unique_labels = np.unique(all_labels)
    #print(f"np Unique labels: {unique_labels}")
    unique_labels=label_mapping_tensor[unique_labels]
    #print(f"np mapped Unique labels: {unique_labels}")
    unique_tasks=torch.unique(task_label_mapping_tensor[all_labels])
    #print(f"tensor Unique tasks: {unique_tasks}")

    class_corrects = {label.item(): 0 for label in unique_labels}
    class_total = {label.item(): 0 for label in unique_labels}

    task_corrects = {label.item(): 0 for label in unique_tasks}
    task_total = {label.item(): 0 for label in unique_tasks}

    if criterion==None:
        criterion=nn.CrossEntropyLoss()
    with torch.no_grad():
        for inputs, labels in dataloader:
            inputs, rwa_labels = inputs.to(device), labels.to(device)

            _, outputs,tsk_outputs,_  = model(inputs)
            #print(f"tsk_outputs: {tsk_outputs.detach().cpu().numpy()}")
            # 使用索引的方法一次性变换所有的labels
            labels = label_mapping_tensor[rwa_labels]
            #print("task_label_mapping_tensor",task_label_mapping_tensor)
            task_labels = task_label_mapping_tensor[rwa_labels]
            #print(f"Task Labels: {task_labels.cpu().numpy()}")
            loss = criterion(outputs, labels)
            #loss = F.cross_entropy(outputs, labels)
            running_loss += loss
            _, predicted = torch.max(outputs, 1)
            _, tsk_predicted = torch.max(tsk_outputs, 1)
            #print(f"tsk_predicted: {tsk_predicted.cpu().numpy()}")
            running_corrects += (predicted == labels).sum()
            
            # 更新每个类别的正确预测数和总样本数
            for label in labels:
                class_corrects[label.item()] += (predicted[labels == label] == label).sum().item()
                class_total[label.item()] += (labels == label).sum().item()

            for task_label in task_labels:
                task_mask = (task_labels == task_label)
                task_corrects[task_label.item()] += (tsk_predicted[task_mask] == task_label).sum().item()
                task_total[task_label.item()] += task_mask.sum().item()

    task_accuracy = {task_label.item(): task_corrects[task_label.item()] / task_total[task_label.item()] for task_label in unique_tasks}
    print(f"Per Task Accuracy: {task_accuracy}")            
    avg_val_loss = running_loss / len(dataloader)
    avg_val_accuracy = running_corrects / len(dataloader.dataset)
    
    # 将字典转换为tensor以便返回
    class_corrects_tensor = torch.tensor([class_corrects.get(i, 0) for i in range(max(class_total.keys()) + 1)])
    class_total_tensor = torch.tensor([class_total.get(i, 0) for i in range(max(class_total.keys()) + 1)])

    return avg_val_loss, avg_val_accuracy, class_corrects_tensor, class_total_tensor





def train_model(device,model,old_model,task_class_orders, dataloaders, criterion,criterion_label, weight_label,optimizer, scheduler, label_mapping,num_epochs=25, stop_accuracy=None, use_weighted_loss=False,weighted_loss_momentum=0.9):
    total_steps = len(dataloaders['train']) * num_epochs
    pbar = tqdm(total=total_steps, desc='Total Training Progress')

    best_loss = float('inf')
    best_model = model.state_dict()
    best_accuracy = 0

    class_weights = None

    # 生成类别mapping
    max_label = max(label_mapping.keys())
    # 创建一个长度等于原始标签范围的零数组
    label_mapping_array = np.zeros(max_label + 1, dtype=int)
    # 使用 label_mapping 字典填充这个数组
    for original_label, new_label in label_mapping.items():
        label_mapping_array[original_label] = new_label
    # 将这个数组转换为 PyTorch 张量
    label_mapping_tensor = torch.from_numpy(label_mapping_array)

    # 基于mapped的类别，在顺序类别上生成task的mapping
    # 确定最大的类索引
    #print(f"Task class orders: {task_class_orders}")
    max_label = max(max(task) for task in task_class_orders)
    # 创建一个长度等于最大类索引加一的零数组
    task_label_mapping_array = np.zeros(max_label + 1, dtype=int)
    # 使用task_class_orders填充这个数组
    for task_idx, task_classes in enumerate(task_class_orders):
        for class_idx in task_classes:
            task_label_mapping_array[class_idx] = task_idx
    #print(f"Task label mapping: {task_label_mapping_array}")
    task_label_mapping_tensor = torch.from_numpy(task_label_mapping_array)
    #print(f"Task label mapping tensor: {task_label_mapping_tensor}")


    for epoch in range(num_epochs):
        #model.load_state_dict(best_model)  # 确保每个epoch开始时模型都是最佳状态
        print('-' * 15)
        print(f'Epoch {epoch+1}/{num_epochs}')
        print('-' * 15)

        train_loss, _, _, _ = train_one_epoch(model,old_model, dataloaders['train'], criterion, criterion_label,weight_label,optimizer, device, pbar,label_mapping_tensor,task_label_mapping_tensor, class_weights)
        print(f'Epoch {epoch+1}/{num_epochs} - Train Loss: {train_loss:.4f}')
        val_loss, val_accuracy, _,_ = validate_one_epoch(model, dataloaders['val'], criterion,label_mapping_tensor, task_label_mapping_tensor,device)
        print(f'Validation Loss: {val_loss:.4f} - Validation Accuracy: {val_accuracy:.4f}')

        if use_weighted_loss:
            this_class_weights = update_class_weights(model, dataloaders, device,label_mapping_tensor=label_mapping_tensor)
            this_class_weights.to(device)
            if class_weights is not None:
                class_weights = weighted_loss_momentum * class_weights + (1 - weighted_loss_momentum) * this_class_weights
            else:
                class_weights = this_class_weights
            print(f'Class Weights: {class_weights}')


        '''if train_loss < best_loss:
            best_loss = train_loss
            print('Loss decreased, updating the model...')
            best_model = model.state_dict()'''

        if val_accuracy > best_accuracy:
            best_accuracy = val_accuracy
            print('Accuracy increased, updating the best accuracy...')
            best_model = model.state_dict()

        if stop_accuracy is not None and val_accuracy >= stop_accuracy:
            print(f'Validation accuracy reached {stop_accuracy}, stopping training...')
            break

        

    scheduler.step()
    pbar.close()
    model.load_state_dict(best_model)
    return model

    #-----------数据集/测试相关
    
from torch.optim import lr_scheduler

def create_cyclic_lr_scheduler(optimizer, base_lr=0.00001, max_lr=0.1, num_epochs=100, cycle_momentum=True):
    """
    创建一个使用三角策略的CyclicLR学习率调度器。

    参数:
    - optimizer: 要应用调度器的优化器。
    - base_lr: 学习率的起始值。
    - max_lr: 学习率的最大值。
    - num_epochs: 总的训练周期数。
    - cycle_momentum: 是否在每个周期中循环动量。

    返回:
    - scheduler: CyclicLR学习率调度器。
    """
    # 计算每轮起伏对应的周期数
    epochs_per_cycle = num_epochs // 5

    # 定义 step_size_up 和 step_size_down
    step_size_up = epochs_per_cycle
    step_size_down = epochs_per_cycle

    # 创建三角策略的CyclicLR调度器
    scheduler = lr_scheduler.CyclicLR(optimizer, mode='triangular2', base_lr=base_lr, max_lr=max_lr,
                                      step_size_up=step_size_up, step_size_down=step_size_down,
                                      cycle_momentum=cycle_momentum)

    return scheduler

# 在完整数据集上评估模型
def evaluate_model(device,label_mapping,model, dataloaders, criterion=None):
    model.to(device)
    model.eval()
    total_loss = 0.0
    total_corrects = 0
    # 生成类别mapping
    max_label = max(label_mapping.keys())
    # 创建一个长度等于原始标签范围的零数组
    label_mapping_array = np.zeros(max_label + 1, dtype=int)
    # 使用 label_mapping 字典填充这个数组
    for original_label, new_label in label_mapping.items():
        label_mapping_array[original_label] = new_label
    # 将这个数组转换为 PyTorch 张量
    label_mapping_tensor = torch.from_numpy(label_mapping_array).to(device)
    if criterion is None:
        criterion = nn.CrossEntropyLoss()
    with torch.no_grad():
        for inputs, labels in dataloaders['val']:
            inputs, labels = inputs.to(device), labels.to(device)  # 确保数据在正确的设备上
            _, outputs,_ ,_= model(inputs)
            labels = label_mapping_tensor[labels]
            loss = criterion(outputs, labels)
            total_loss += loss.item() * inputs.size(0)
            total_corrects += torch.sum(torch.argmax(outputs, 1) == labels.data)

    loss = total_loss / len(dataloaders['val'].dataset)
    accuracy = total_corrects.double() / len(dataloaders['val'].dataset)
    return loss, accuracy