import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils import data
from torchvision import datasets
import torchvision.models as models
from sklearn.metrics import roc_auc_score, confusion_matrix
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
import os
from torch.optim import lr_scheduler


#加载模型
model = models.googlenet(pretrained=True)
print(model)
# 定义新的全连接层
model.add_module('fc1', nn.Linear(1000, 2))   # 第一个全连接层
model.add_module('relu1', nn.ReLU(inplace=True))
model.add_module('dropout1', nn.Dropout(0.5))
print(model)

aaa
for name, param in model.named_parameters():  # 冻结除去后两层的其他部分参数，便于重新训练后两个全连接层的参数
    if 'fc' not in name:
        param.requires_grad = False
    else:
        param.requires_grad = True

#载入数据
data_path = 'D:/DeepModel/le_MultJpg'
train_path = os.path.join(data_path, 'train')
test_path = os.path.join(data_path, 'test')

# 选择用GPU还是CPU运行
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
# 创建了一个SummaryWriter对象，用于将训练过程中的指标和日志写入TensorBoard日志文件。
# 通过writer对象，可以将训练过程中的损失、准确率等指标写入日志，并在TensorBoard中进行可视化和分析。
writer = SummaryWriter('D:/DeepModel/model train')
# 训练集测试集数据预处理（将图片裁剪为224x224，随机水平翻转，归一化）
trans_train = transforms.Compose([transforms.RandomResizedCrop((224, 224)),
                                  transforms.RandomHorizontalFlip(),
                                  transforms.ToTensor(),
                                  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

trans_test = transforms.Compose([transforms.Resize(256),
                                 transforms.CenterCrop(224),
                                 transforms.ToTensor(),
                                 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

#因为imagefolder是根据文件夹顺序建立索引。我想恶性的索引是1，良性是0.所以把两个索引调换了一下
def custom_target_transform(target):
    # 将第一个文件夹的索引标签改为1，第二个文件夹的索引标签改为0
    if target == 0:
        target = 1
    elif target == 1:
        target = 0
    return target


# 载入训练集
train_dataset = datasets.ImageFolder(train_path, trans_train,target_transform=custom_target_transform)  # 前为路径，后为预处理
# 载入测试集
test_dataset = datasets.ImageFolder(test_path, trans_test,target_transform=custom_target_transform)

# 每训练一次训练批次为 “32”
train_batch_size = 32
test_batch_size = 32
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=0,drop_last=True)
testloader = torch.utils.data.DataLoader(test_dataset, batch_size=test_batch_size, shuffle=True, num_workers=0,drop_last=True)

model.to(device)  # 移动到CPU或者GPU

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, weight_decay=0.001, momentum=0.9)
# Adam
# optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.001)

# 学习率降低策略
lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)

# ---------------------- model training ----------------------
# 训练轮次
EPOCHS = 100
# 存储每个训练周期的训练集损失、测试集损失、训练集准确率、测试集准确率，便于后续绘图分析
train_epoch_loss, test_epoch_loss, train_epoch_acc, test_epoch_acc = [], [], [], []  # 用来记录每个epoch的训练、测试误差以及准确率

# -------------- train训练 --------------
mx_train_acc=0
mx_test_acc=0
mx_train_auc=0
mx_test_auc=0
mx_train_spe=0
mx_test_spe=0
mx_train_sen=0
mx_test_sen=0
for epoch in range(EPOCHS):  # 遍历训练，分为几个周期
    model.train()
    train_loss = 0
    train_correct = 0
    for step, (train_img, train_label) in enumerate(trainloader):  # 循环遍历训练数据批次
        train_img, train_label = train_img.to(device), train_label.to(device)  # 数据转移到设备
        output = model(train_img)  # 前向传播
        loss = criterion(output, train_label)  # 计算损失
        optimizer.zero_grad()  # 梯度清零
        loss.requires_grad_(True)
        loss.backward()  # 反向传播
        optimizer.step()  # 更新模型参数

        correct_num = torch.sum(torch.argmax(output, dim=1) == train_label)  # 计算当前批次的正确样本数
        train_correct += correct_num  # 累加正确数和损失
        train_loss += loss
        # print(train_correct)
        writer.add_scalar('train_loss_batch', loss.item(), step)  # 记录一个周期训练过程中的损失和准确率
        accurat_rate = correct_num / train_batch_size
        writer.add_scalar('train_accurate_batch', accurat_rate.item(), step)

    train_epoch_loss.append(train_loss / len(trainloader))
    train_epoch_acc.append(train_correct / len(train_dataset))
    writer.add_scalar('train_loss_epoch', train_loss / len(trainloader), epoch)
    writer.add_scalar('train_accurate_epoch', train_correct / len(train_dataset), epoch)

    # -------------- valid验证 --------------
    model.eval()
    test_loss, test_correct = 0, 0
    for test_img, test_label in testloader:
        test_img, test_label = test_img.to(device), test_label.to(device)
        output = model(test_img)
        loss = criterion(output, test_label)

        correct_num = torch.sum(torch.argmax(output, dim=1) == test_label)
        test_correct += correct_num
        test_loss += loss

    test_epoch_loss.append(test_loss / len(testloader))
    test_epoch_acc.append(test_correct / len(test_dataset))
    writer.add_scalar('test_loss_epoch', train_loss / len(trainloader), epoch)
    writer.add_scalar('test_accurate_epoch', train_correct / len(train_dataset), epoch)

    print('epoch{}, train_loss={}, train_acc={}'.format(epoch, train_loss / len(trainloader),
                                                        train_correct / len(train_dataset)))
    if train_correct / len(train_dataset) > mx_train_acc:
        mx_train_acc=train_correct / len(train_dataset)
    print(
        'epoch{}, valid_loss={}, valid_acc={}'.format(epoch, test_loss / len(testloader),
                                                      test_correct / len(test_dataset)))
    if test_correct / len(test_dataset) > mx_test_acc:
        mx_test_acc=test_correct / len(test_dataset)

    # -------------- 计算AUC、Spe、Sen --------------

    y_true_train, y_pred_train = [], []
    y_true_test, y_pred_test = [], []
    model.eval()
    for train_img, train_label in trainloader:
        train_img, train_label = train_img.to(device), train_label.to(device)
        output = model(train_img)
        output_prob = torch.nn.functional.softmax(output, dim=1)

        _, pred = torch.max(output_prob, 1)
        y_true_train.extend(train_label.cpu().numpy())
        y_pred_train.extend(output_prob[:, 1].detach().cpu().numpy())  # 正类的概率

    for test_img, test_label in testloader:
        test_img, test_label = test_img.to(device), test_label.to(device)
        output = model(test_img)
        output_prob = torch.nn.functional.softmax(output, dim=1)

        _, pred = torch.max(output_prob, 1)
        y_true_test.extend(test_label.cpu().numpy())
        y_pred_test.extend(output_prob[:, 1].detach().cpu().numpy())  # 正类的概率

    y_true_train = np.array(y_true_train)
    y_pred_train = np.array(y_pred_train)
    y_true_test = np.array(y_true_test)
    y_pred_test = np.array(y_pred_test)

    # 计算AUC
    auc_train = roc_auc_score(y_true_train, y_pred_train)
    auc_test = roc_auc_score(y_true_test, y_pred_test)
    writer.add_scalar('train_auc_epoch', auc_train, epoch)
    writer.add_scalar('test_auc_epoch', auc_test, epoch)

    # 计算混淆矩阵和指标
    train_tn, train_fp, train_fn, train_tp = confusion_matrix(y_true_train, y_pred_train > 0.5).ravel()
    train_specificity = train_tn / (train_tn + train_fp)
    train_sensitivity = train_tp / (train_tp + train_fn)

    tn, fp, fn, tp = confusion_matrix(y_true_test, y_pred_test > 0.5).ravel()
    specificity = tn / (tn + fp)
    sensitivity = tp / (tp + fn)

    writer.add_scalar('train_specificity_epoch', train_specificity, epoch)
    writer.add_scalar('train_sensitivity_epoch', train_sensitivity, epoch)
    writer.add_scalar('test_specificity_epoch', specificity, epoch)
    writer.add_scalar('test_sensitivity_epoch', sensitivity, epoch)

    print('epoch{}, train_auc={}, train_spe={}, train_sen={}'.format(epoch, auc_train, train_specificity,
                                                                     train_sensitivity))
    if auc_train > mx_train_auc:
        mx_train_auc = auc_train
    if train_specificity > mx_train_spe:
        mx_train_spe = train_specificity
    if train_sensitivity > mx_train_sen:
        mx_train_sen = train_sensitivity
    print('epoch{}, test_auc={}, test_spe={}, test_sen={}'.format(epoch, auc_test, specificity, sensitivity))
    if auc_test > mx_test_auc:
        mx_test_auc = auc_test
    if specificity > mx_test_spe:
        mx_test_spe = specificity
    if sensitivity > mx_test_sen:
        mx_test_sen = sensitivity
    print('\n')
print('max_train_auc={}, max_train_acc={}, max_train_spe={}, max_train_sen={}'.format(mx_train_auc, mx_train_acc,
                                                                                      mx_train_spe, mx_train_sen))
print('max_test_auc={}, max_test_acc={}, max_test_spe={}, max_test_sen={}'.format(mx_test_auc, mx_test_acc, mx_test_spe,
                                                                                  mx_test_sen))

'''
    Specificity（SPE）：衡量的是模型正确识别负样本（即非目标类别）的能力。
    SPE越高，说明模型将非目标类别误判为目标类别的比例越低，即模型对非目标类别的识别能力越强。
    在需要严格控制误报率（即将非目标类别误判为目标类别）的场景中，高SPE是好的。

    Sensitivity（SEN）：衡量的是模型正确识别正样本（即目标类别）的能力。
    SEN越高，说明模型将目标类别误判为非目标类别的比例越低，即模型对目标类别的识别能力越强。
    在需要尽可能多地捕获目标类别（即使可能增加误报率）的场景中，高SEN是好的。
'''

save_path ='D:/DeepModel/model_train/data/googlenet_lung.pth'
# 提取目录部分
dir_path = os.path.dirname(save_path)
# 如果目录不存在，则创建它
if not os.path.exists(dir_path):
    os.makedirs(dir_path)
# 现在可以保存模型了，因为目录已经存在（把训练好的模型保存，下次提取特征不用重新训练，直接加载训练好的模型就行）
torch.save(model.state_dict(), save_path)
#输出结果

train_loss_array = [loss.item() for loss in train_epoch_loss]
train_acc_array = [acc.item() for acc in train_epoch_acc]
test_loss_array = [loss.item() for loss in test_epoch_loss]
test_acc_array = [acc.item() for acc in test_epoch_acc]

result_dict = {'train_loss_array': train_loss_array,
               'train_acc_array': train_acc_array,
               'test_loss_array': test_loss_array,
               'test_acc_array': test_acc_array}



# -------------  1000 feature extraction  -------------
#加载训练好的模型
device = torch.device('cpu')
model = model.to(device)
#加载训练好的模型
model.load_state_dict(torch.load('D:/DeepModel/model_train/data/googlenet_100_32.pth'))
#因为features方法只能提取最后一层特征，把要提取的层后面的层都删除
print(model._modules.keys())
del model.fc1
del model.relu1
del model.dropout1
print(model._modules.keys())

#模型评估模型
model.eval()

features_list = []  # 用于存储特征的列表
labels_list = []  # 用于存储对应标签的列表

all_path = 'D:/DeepModel/le-mulpng1'
trans_all = transforms.Compose([transforms.Resize(256),
                                 transforms.CenterCrop(224),
                                 transforms.ToTensor(),
                                 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
all_dataset = datasets.ImageFolder(all_path, trans_all,target_transform=custom_target_transform)
allloader = torch.utils.data.DataLoader(all_dataset, batch_size=test_batch_size, shuffle=False, num_workers=0)
#循环图片提取特征保存
with torch.no_grad():  # 不需要计算梯度
    for images, labels in allloader:  # all
        images = images.to(device)
        labels = labels.to(device)
        features = model(images)  # 获取全连接层的输出作为特征，为1*1000的numpy数组
        features_list.extend(features.cpu().numpy())  # 将特征添加到列表中，并转换为numpy数组
        labels_list.extend(labels.cpu().numpy())  # 保存标签

# 将特征列表转换为numpy数组
features_np = np.array(features_list)

# 创建DataFrame并保存到.csv文件
df = pd.DataFrame(features_np)

if labels_list:  # 如果也保存了标签
    df['label'] = labels_list

df.to_csv('D:/DeepModel/googlenet-features.csv', index=False)  # 保存到CSV文件，不保存行索引

