#读数据 + 导入模型 + 损失 + 优化器 + 训练、验证 + 学习曲线
from functools import total_ordering
import math
import os
from re import I, L
from sqlite3 import Row
import time
from tkinter.messagebox import YES
from tkinter.ttk import Style
import matplotlib.pyplot as plt
import torch
from torch import nn
import torchvision
from torchvision import transforms
import torch.functional as F
from sklearn.metrics import roc_curve, auc
import numpy as np
from torch.utils.data import DataLoader, Subset
from data_read import mf_data_seg, mf_data_seg_A
from u_net_mf import *
from loss_function_mf2 import *
from matplotlib import rcParams
import pandas as pd
import albumentations as A
from albumentations.augmentations.transforms import Normalize
from albumentations.pytorch.transforms import ToTensorV2
import cv2
import numpy as np
# 设置字体为黑体
rcParams['font.sans-serif'] = ['SimHei']
from torch.utils.tensorboard import SummaryWriter

#%% 这里是GPU的一些基础设置

device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
# 数据增强设置
data_trans_train = A.Compose([
    A.Resize(height=512, width=512),
    A.Flip(p=0.5),
    A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=180, p=0.5, border_mode=cv2.BORDER_CONSTANT),
    A.HorizontalFlip(p=0.5),
    ToTensorV2()
])

data_trans_val = A.Compose([
    A.Resize(height=512, width=512),
    ToTensorV2()
])

# 测试集无增强，仅预处理
data_trans_test = A.Compose([
    A.Resize(height=512, width=512),
    ToTensorV2()
])

# 数据集路径（按照划分后的结果）
root_path = "/data/zhouhai/fuxian/视/处理数据集/新数据"  # 使用刚才划分后的 od1 文件夹
train_data = mf_data_seg_A(root_path=os.path.join(root_path, 'train'), flag='train', transforms=data_trans_train)
val_data = mf_data_seg_A(root_path=os.path.join(root_path, 'val'), flag='val', transforms=data_trans_train)
test_data = mf_data_seg_A(root_path=os.path.join(root_path, 'test'), flag='test', transforms=data_trans_test)

# 数据集大小
train_data_size = len(train_data)
val_data_size = len(val_data)
test_data_size = len(test_data)

print('训练集长度: {}'.format(train_data_size))
print('验证集长度: {}'.format(val_data_size))
print('测试集长度: {}'.format(test_data_size))

# 创建 DataLoader
batch_size = 5
train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=False)
test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

# 计算批次数量
train_batch_num = math.ceil(train_data_size / batch_size)
val_batch_num = math.ceil(val_data_size / batch_size)
test_batch_num = math.ceil(test_data_size / batch_size)

print(f"训练集批次数量: {train_batch_num}")
print(f"验证集批次数量: {val_batch_num}")
print(f"测试集批次数量: {test_batch_num}")


#损失函数
patience = 15# 最多允许验证损失不下降的轮数
min_delta = 1e-4  # 损失下降的最小幅度
best_val_loss = float('inf')  # 记录验证集上的最佳损失
patience_counter = 0  # 记录验证损失未下降的轮数
epoch=100
#优化器
learning_rate =  1e-5

#设置训练网络的一些参数
image_size = 512
center_point = (256, 256)  # 辅助点生成的中心点
target_point = (230, 135)  # 新的目标点
target_pixel = target_point  # 已经是像素坐标，不需要转换
max_layers =9# 最大扩展层数
all_results_df = pd.DataFrame(columns=["Run", "Layer", "Sensitivity", "Specificity", "Average"])
sensitivity_matrix=[]
writer = SummaryWriter('/data/zhouhai/fuxian/视/普通训练/experiment_1')  # 创建一个新的日志文件夹

for run in range(5):    
    model = U_Net().to(device)
    model.apply(lambda m: m.reset_parameters() if hasattr(m, 'reset_parameters') else None)
    # 重新初始化优化器
    os.makedirs('/data/zhouhai/fuxian/视/普通训练/模型', exist_ok=True)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    
    for layer in  [8,9]:
        model_dir='/data/zhouhai/fuxian/视/普通训练/模型/'
        model_path = f"{model_dir}{layer-1}best_model.pth"
        train_loss_list, val_loss_list = [], []
        patience_counter = 0
    
        step = 2**layer-1 # 辅助点扩展步长
        layer_center_accuracies = []
        center_val_loss_values = []
        sampled_val_loss_values = []
                # 初始化模型和优化器
        best_val_loss=10
        for epochs in range(epoch):
            model.train()
            epoch_train_loss = 0
            ob=0
            for data in train_dataloader:
                optimizer.zero_grad()
                img, label = data[0].to(device), data[1].to(device)
                target_pixel=data[2]
                #print("Label range:", label.min().item(), label.max().item())
                pred = model(img)

                # 二元分类交叉熵目标点损失
                target_loss = nn.functional.binary_cross_entropy_with_logits( 
                    pred[:, :, target_pixel[0], target_pixel[1]],
                    label[:, :, target_pixel[0], target_pixel[1]].float()
                )

                if layer== 0:
                    total_loss = target_loss  # 只计算目标点损失
                    total_loss.backward()
                    optimizer.step()#更新模型参数
            

                elif layer != 0:
                    auxiliary_points = generate_auxiliary_points(
                    center_x=center_point[0],
                    center_y=center_point[1],
                    current_layer=layer,
                    image_size=image_size,
                    exclude_point=target_pixel,
                    image=img,
                    threshold=0.1
                )
                    # 辅助点损失
                    aux_y = auxiliary_points[:, 0].long()
                    aux_x = auxiliary_points[:, 1].long()

                    # 批次处理辅助点
                    pred_aux = pred[:, :, aux_y, aux_x].contiguous().to(device)
                    label_aux = label[:, :, aux_y, aux_x].float().contiguous().to(device)
                    count_ones = torch.sum(label_aux)
                    count_ones = torch.sum(label_aux)  # 标签为1的总数
                    total_aux_points = label_aux.numel()  # 辅助点的总数
                    
                    # 计算辅助损失
                    aux_loss = nn.functional.binary_cross_entropy_with_logits(pred_aux, label_aux)
                    target_coord = np.array([target_pixel[0], target_pixel[1]])

                    total_loss = target_loss + aux_loss
                    #计算主任务损失
                   # 假设 target_loss 已经计算好
                    # 如果辅助任务没有正样本，只使用主任务损失进行优化
                    # if count_ones == 0:
                    #     ob+=1
                    #     del pred_aux, label_aux  # 删除显存中的张量
                    #     torch.cuda.empty_cache()  # 清理显存
                    #     optimizer.zero_grad()
                    #     target_loss.backward()  # 只对主任务损失进行反向传播
                    #     optimizer.step()
                    # else:
                    #     # 如果辅助任务有正样本，使用总损失进行优化
                    #     total_loss = target_loss + aux_loss
                    total_loss.backward()
                    optimizer.step()
                
                epoch_train_loss += total_loss.item()

            longer= len(train_dataloader)

            train_loss_list.append(epoch_train_loss /(longer))
            writer.add_scalar(f'Loss/train/layer_{layer}', train_loss_list[-1], epoch)
            model.eval()
            epoch_val_loss = 0
            results_df = pd.DataFrame(columns=["Layer", "Best Validation Loss"])
            with torch.no_grad():
                for data in val_dataloader:
                    img, label = data[0].to(device), data[1].to(device)
                    pred = model(img)
                    # 目标点损失
                    target_loss = nn.functional.binary_cross_entropy_with_logits(
                        pred[:, :, target_pixel[0], target_pixel[1]],
                        label[:, :, target_pixel[0], target_pixel[1]].float()
                    )
                        # 综合验证损失
                    total_val_loss = target_loss 
                    epoch_val_loss += total_val_loss.item()
                val_loss_list.append(epoch_val_loss / len(val_dataloader))
                writer.add_scalar(f'Loss/val/layer_{layer}', val_loss_list[-1], epoch)
                if val_loss_list[-1]< best_val_loss - min_delta:
                    best_val_loss = val_loss_list[-1]
                    patience_counter = 0  # 重置耐心计数器
                    torch.save(model.state_dict(), '/data/zhouhai/fuxian/视/普通训练/模型/{}best_model.pth'.format(layer))
    #             print(f"Validation loss improved to {best_val_loss:.4f}.  {layer}Model saved.")
                else:
                    patience_counter += 1  # 增加耐心计数器
        #        print(f"No improvement in validation loss for {patience_counter}/{patience} epochs.")
                # # 早停条件
                # if patience_counter >= patience:
                #     print(f"Early stopping triggered. Best validation loss: {best_val_loss:.4f}stop at{epochs}.")
                #     break  # 退出 epoch 循环
                # print(f"{layer}Epoch {epochs+1}/{epoch} - Train Loss: {train_loss_list[-1]:.4f}, Val Loss: {val_loss_list[-1]:.4f},patience:{patience_counter}")
                for name, param in model.named_parameters():
                    writer.add_histogram(f'{name}_weights', param, epoch)
                    writer.add_histogram(f'{name}_gradients', param.grad, epoch)

                plt.plot(val_loss_list, label='Validation Loss')
                plt.plot(train_loss_list, label='train Loss')
                plt.xlabel('Epoch')
                plt.ylabel('Loss')
                plt.title('{}Validation Loss vs. {}Epochs'.format(layer,epochs))
                plt.savefig("{}检查.png".format(layer))
            # 将最佳损失记录到表格

        # 保存结果到 CSV 文件
        plt.clf()
        plt.cla()
        print(best_val_loss)
   #%%  这里是GPU的一些基础设置

    model_dir='/data/zhouhai/fuxian/视/普通训练/模型/'
    models = []
    for i in range(0, 9):
        model = U_Net().to(device)
        model_path = f"{model_dir}{i}best_model.pth"
        model.load_state_dict(torch.load(model_path))  # 加载参数到模型
        model = model.to(device)  # 确保模型在指定设备上
        models.append(model)
        print(f"Model type: {type(model)}")  # 打印模型类型
    #评估函数（专注于目标点）
    def evaluate_model(model, dataloader):
        all_labels = []
        all_predictions = []
        model.eval()
        with torch.no_grad():
            for data in dataloader:
                    if len(data) == 3:  # 对应测试集
                        img, label, _ = data  # 解包三个值，忽略文件名
                    else:
                        img, label = data  # 训练集或验证集，解包两个值
                    img, label = img.to(device), label.to(device)  # 移动到设备
                    pred = model(img)
                    pred_np = pred.detach().cpu().numpy()  # 转换为 NumPy 数组
                    label_np = label.detach().cpu().numpy()  # 转换为 NumPy 数组
                    pred_target = pred_np[:, :, target_pixel[0], target_pixel[1]]
                    label_target = label_np[:, :, target_pixel[0], target_pixel[1]]
                    #print(label_target.sum())
                    from sklearn.preprocessing import MinMaxScaler

                    # 假设pred_target是您的预测结果
                    pred_target_flattened = pred_target.flatten()

                    # 创建MinMaxScaler对象
                    scaler = MinMaxScaler()

                    # 将数据归一化到0和1之间
                    pred_target_normalized = scaler.fit_transform(pred_target_flattened.reshape(-1, 1)).flatten()

                    # 将归一化后的数据添加到all_predictions列表
                    all_predictions.extend(pred_target_normalized)
                    all_labels.extend((label_target > 0.5).flatten().astype(int))  # 目标点真实标签
            # 使用 ROC 曲线计算敏感性和特异性
            fpr, tpr, thresholds = roc_curve(all_labels, all_predictions)
            sensitivities = tpr
            specificities = 1 - fpr

            # 找到敏感性和特异性最接近的点
            avg_diff =abs (sensitivities - specificities)
            best_index = avg_diff.argmin()
            best_sensitivity = sensitivities[best_index]
            best_specificity = specificities[best_index]
            return best_sensitivity, best_specificity

    # 批量评估模型性能


    # 假设 models 是一个包含多个模型的列表


    # 假设你已经在目标路径下
    os.chdir('/data/zhouhai/fuxian/视/普通训练/结果')

    # 定义运行次数和层数
    num_layers = 10  # 假设每个模型有10层
    layer_names = [f"Layer_{i}" for i in range(num_layers)]

    # 总的 DataFrame，用于存储所有模型的运行结果
    

    # 用于绘制平均值变化图
    average_values_per_run = []
    # 创建一个 DataFrame 用于存储当前模型运行的结果
    run_results_df = pd.DataFrame(columns=["Run", "Layer", "Sensitivity", "Specificity", "Average"])
    for index, model_filename in enumerate(models, start=1):
    # 从文件名中提取出模型的层号（例如：0best_model.pth --> 层号0）
        layer_name = f"Layer_{index}"

        # 假设模型对应的层是该模型的唯一层

        # 调用评估函数，返回敏感性和特异性
        sensitivity, specificity = evaluate_model(model_filename, test_dataloader)
        avg_value = (sensitivity + specificity) / 2

        # 将当前模型的层结果存入 DataFrame
        run_results_df = run_results_df.append({
            "Run": run,
            "Layer": layer_name,
            "Sensitivity": sensitivity,
            "Specificity": specificity,
            "Average": avg_value
        }, ignore_index=True)

     # 将当前run的结果保存为一个CSV文件
    run_results_csv = os.path.join('/data/zhouhai/fuxian/视/普通训练/结果/', f"{run}.csv")
    run_results_df.to_csv(run_results_csv, index=False)
    print(f"Run {run}: All models results saved to {run_results_csv}")

    # 将当前run的结果合并到总的 DataFrame
    all_results_df = pd.concat([all_results_df, run_results_df], ignore_index=True)

    # 计算当前run的平均值
    sensitivity_list = run_results_df['Sensitivity'].tolist()
    
    # 如果是第一次循环，初始化 sensitivity_matrix
    sensitivity_matrix.append(sensitivity_list)

# 将 sensitivity_matrix 转换为 NumPy 数组
sensitivity_array = np.array(sensitivity_matrix)
average_values_per_run= np.mean(sensitivity_array, axis=1)
# 绘制所有运行平均值的变化图
print(average_values_per_run)
plt.plot( average_values_per_run, marker='o', label="Average Sensitivity and Specificity")
plt.xlabel("Run (Model)")
plt.ylabel("Average Sensitivity and Specificity")
plt.title("Run-wise Average Sensitivity and Specificity")
plt.legend()
plt.grid()
# 保存图表
plt.savefig("Run_Average_Comparison.png")
writer.close()