import csv
import time
import numpy as np
import torch
import torch.nn as nn
from torch.utils import data
from torchvision import datasets
import torchvision.models as models
from sklearn.metrics import roc_auc_score, confusion_matrix
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
import os
import re

import shutil
import cv2
from PIL import Image
from torch.optim import lr_scheduler
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from torchvision.datasets import DatasetFolder, ImageFolder
from torchvision.models import vgg16
from torchvision.transforms import CenterCrop
from tqdm import tqdm
import pandas as pd
import wandb
from torchvision.models import alexnet

#加载模型
model = models.alexnet(pretrained=True)
print(model)

# 删除最后两层全连接层
model.classifier = nn.Sequential(*list(model.classifier.children())[:-3])

# 定义新的全连接层
model.classifier.add_module('linear1', nn.Linear(4096, 2048))   # 第一个全连接层
model.classifier.add_module('relu', nn.ReLU(inplace=True))
model.classifier.add_module('linear2', nn.Linear(2048, 2))

print(model)

for name, param in model.named_parameters():  # 冻结除去后两层的其他部分参数，便于重新训练后两个全连接层的参数
    if 'linear' not in name:
        param.requires_grad = False
    else:
        param.requires_grad = True

#载入数据
data_path = 'E:\le-mulpng'
train_path = os.path.join(data_path, 'train')
test_path = os.path.join(data_path, 'test')
all_path = 'E:\le-mulpng1'
# 选择用GPU还是CPU运行
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# 创建了一个SummaryWriter对象，用于将训练过程中的指标和日志写入TensorBoard日志文件。
# 通过writer对象，可以将训练过程中的损失、准确率等指标写入日志，并在TensorBoard中进行可视化和分析。
writer = SummaryWriter('E:\lung\model train\shengdu/alexnet_data')
# 训练集测试集数据预处理（将图片裁剪为224x224，随机水平翻转，归一化）
trans_train = transforms.Compose([transforms.RandomResizedCrop((224, 224)),
                                  transforms.RandomHorizontalFlip(),
                                  transforms.ToTensor(),
                                  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

trans_test = transforms.Compose([transforms.Resize(256),
                                 transforms.CenterCrop(224),
                                 transforms.ToTensor(),
                                 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
trans_all = transforms.Compose([transforms.Resize(256),
                                 transforms.CenterCrop(224),
                                 transforms.ToTensor(),
                                 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])


#因为imagefolder是根据文件夹顺序建立索引。我想恶性的索引是1，良性是0.所以把两个索引调换了一下
def custom_target_transform(target):
    # 将第一个文件夹的索引标签改为1，第二个文件夹的索引标签改为0
    if target == 0:
        target = 1
    elif target == 1:
        target = 0
    return target


# 载入训练集
train_dataset = datasets.ImageFolder(train_path, trans_train,target_transform=custom_target_transform)  # 前为路径，后为预处理
# 载入测试集
test_dataset = datasets.ImageFolder(test_path, trans_test,target_transform=custom_target_transform)
all_dataset = datasets.ImageFolder(all_path, trans_all,target_transform=custom_target_transform)

# 每训练一次训练批次为 “2”
train_batch_size = 32
test_batch_size = 32
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=0)
testloader = torch.utils.data.DataLoader(test_dataset, batch_size=test_batch_size, shuffle=True, num_workers=0)
allloader = torch.utils.data.DataLoader(all_dataset, batch_size=test_batch_size, shuffle=False, num_workers=0)

model.to(device)  # 移动到CPU或者GPU

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.classifier.parameters(), lr=0.001, weight_decay=0.001, momentum=0.9)

# 学习率降低策略
lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)

# ---------------------- model training ----------------------
# 训练轮次
EPOCHS = 100
# 存储每个训练周期的训练集损失、测试集损失、训练集准确率、测试集准确率，便于后续绘图分析
train_epoch_loss, test_epoch_loss, train_epoch_acc, test_epoch_acc = [], [], [], []

# -------------- train训练 --------------
for epoch in range(EPOCHS):  # 遍历训练，分为几个周期
    model.train()
    train_loss = 0
    train_correct = 0
    for step, (train_img, train_label) in enumerate(trainloader):  # 循环遍历训练数据批次
        train_img, train_label = train_img.to(device), train_label.to(device)  # 数据转移到设备
        output = model(train_img)  # 前向传播
        loss = criterion(output, train_label)  # 计算损失
        optimizer.zero_grad()  # 梯度清零
        loss.backward()  # 反向传播
        optimizer.step()  # 更新模型参数

        correct_num = torch.sum(torch.argmax(output, dim=1) == train_label)  # 计算当前批次的正确样本数
        train_correct += correct_num  # 累加正确数和损失
        train_loss += loss

        # print(train_correct)
        writer.add_scalar('train_loss_batch', loss.item(), step)  # 记录一个周期训练过程中的损失和准确率
        accurat_rate = correct_num / train_batch_size
        writer.add_scalar('train_accurate_batch', accurat_rate.item(), step)

    train_epoch_loss.append(train_loss / len(trainloader))
    train_epoch_acc.append(train_correct / len(train_dataset))
    writer.add_scalar('train_loss_epoch', train_loss / len(trainloader), epoch)
    writer.add_scalar('train_accurate_epoch', train_correct / len(train_dataset), epoch)

    # -------------- valid验证 --------------
    model.eval()
    test_loss, test_correct = 0, 0
    for test_img, test_label in testloader:
        test_img, test_label = test_img.to(device), test_label.to(device)
        output = model(test_img)
        loss = criterion(output, test_label)

        correct_num = torch.sum(torch.argmax(output, dim=1) == test_label)
        test_correct += correct_num
        test_loss += loss

    test_epoch_loss.append(test_loss / len(testloader))
    test_epoch_acc.append(test_correct / len(test_dataset))
    writer.add_scalar('test_loss_epoch', train_loss / len(trainloader), epoch)
    writer.add_scalar('test_accurate_epoch', train_correct / len(train_dataset), epoch)

    print('epoch{}, train_loss={}, train_acc={}'.format(epoch, train_loss / len(trainloader),
                                                        train_correct / len(train_dataset)))
    print(
        'epoch{}, valid_loss={}, valid_acc={}'.format(epoch, test_loss / len(testloader),
                                                      test_correct / len(test_dataset)))
    print('\n')

save_path = 'E:/lung/model train/shengdu/alexnet_lung.pth'
# 提取目录部分
dir_path = os.path.dirname(save_path)
# 如果目录不存在，则创建它
if not os.path.exists(dir_path):
    os.makedirs(dir_path)
# 现在可以保存模型了，因为目录已经存在（把训练好的模型保存，下次提取特征不用重新训练，直接加载训练好的模型就行）
torch.save(model.state_dict(), save_path)
#输出结果
train_loss_array = [loss.item() for loss in train_epoch_loss]
train_acc_array = [acc.item() for acc in train_epoch_acc]
test_loss_array = [loss.item() for loss in test_epoch_loss]
test_acc_array = [acc.item() for acc in test_epoch_acc]

result_dict = {'train_loss_array': train_loss_array,
               'train_acc_array': train_acc_array,
               'test_loss_array': test_loss_array,
               'test_acc_array': test_acc_array}



# -------------  1000 feature extraction  -------------

#加载训练好的模型

model = model.to(torch.device('cpu'))
# 加载训练好的模型并将其映射到CPU上
model.load_state_dict(torch.load('alexnet_lung.pth', map_location=torch.device('cpu')))
#因为features方法只能提取最后一层特征，把要提取的层后面的层都删除
model.classifier = nn.Sequential(*list(model.classifier.children())[:-2])
print(model)


#模型评估模型
model.eval()
features_list = []  # 用于存储特征的列表

labels_list = []  # 用于存储对应标签的列表
#循环图片提取特征保存
with torch.no_grad():  # 不需要计算梯度
    for images, labels in allloader:  # all
        images = images.to(device)
        labels = labels.to(device)
        features = model(images)  # 获取全连接层的输出作为特征，为1*1000的numpy数组
        features_list.extend(features.cpu().numpy())  # 将特征添加到列表中，并转换为numpy数组
        labels_list.extend(labels.cpu().numpy())  # 保存标签
"""
    for images, labels in trainloader:  # 测试集
        images = images.to(device)
        labels = labels.to(device)
        features = model(images)  # 获取全连接层的输出作为特征，为1*1000的numpy数组
        features_list.extend(features.cpu().numpy())  # 将特征添加到列表中，并转换为numpy数组
        labels_list.extend(labels.cpu().numpy())  # 保存标签
"""
# 将特征列表转换为numpy数组
features_np = np.array(features_list)

# 创建DataFrame并保存到.csv文件
df = pd.DataFrame(features_np)

if labels_list:  # 如果也保存了标签
    df['label'] = labels_list

df.to_csv('E:\lung\model train\shengdu/alexnet-features.csv', index=False)  # 保存到CSV文件，不保存行索引





