import os

from torchvision.models import SqueezeNet1_1_Weights

os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from skimage import io
import numpy as np
import torchvision
import torchvision.transforms as transforms
import warnings
import torchvision.models as models
import torch.optim as optim
from sklearn.preprocessing import LabelEncoder
from PIL import Image
import matplotlib.pyplot as plt
import time

device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
warnings.filterwarnings('ignore')

mean = [0.5, 0.5, 0.5]
# std = [0.25, 0.25, 0.25]
std = [0.5, 0.5, 0.5]
transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean, std),
])


class butterfly_dataset(Dataset):
    def __init__(self, csv_file, root_dir, transform=None):
        self.annotations = pd.read_csv(csv_file)
        self.root_dir = root_dir
        self.transform = transform
        self.label_encoder = LabelEncoder()
        self.annotations['label_encoded'] = self.label_encoder.fit_transform(self.annotations['label'])

    def __len__(self):
        return len(self.annotations)

    def __getitem__(self, index):
        img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0])
        image = io.imread(img_path)
        image = Image.fromarray(image)
        y_label = torch.tensor(int(self.annotations.iloc[index, 2]))
        if self.transform:
            image = self.transform(image)

        return (image, y_label)


train_pth = r'archive\Training_set.csv'
train_root_dir = r'archive\train'
#
train_dataset = butterfly_dataset(train_pth, train_root_dir, transform=transform)
batch_size = 32
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

model = models.squeezenet1_1(weights=True)

# 所有参数都设为不可更新
for params in model.parameters():
    params.requires_grad = False

# 获取模型最后一个层的输入特征数
num_features = model.classifier[1].in_channels
# 替换最后一个全连接层
num_classes = 75
model.classifier[1] = nn.Conv2d(num_features, num_classes, kernel_size=(1, 1), stride=(1, 1))
# 将模型的最后一层设置为分类层
model.num_classes = num_classes

# 定义损失函数，交叉熵损失函数
criterion = nn.CrossEntropyLoss()
lr = 0.0005
momentum = 0.9
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
# optimizer = optim.Adam(model.parameters(), lr=lr)

num_epochs = 30
model.to(device)
train_metrics = {'loss': [], 'accuracy': []}
start_time = time.time()
print("Begin")
for epoch in range(num_epochs):
    # 累加训练过程中的损失值
    running_loss = 0.0
    # 累加预测正确的样本数
    correct = 0
    # 累加样本的总数量
    total = 0

    model.train()
    for images, labels in train_dataloader:

        images = images.to(device)      # torch.Size([32, 3, 224, 224])
        labels = labels.to(device)      # (32,)
        # print(images.shape)
        # 使用模型进行前向传播得到预测值
        outputs = model(images)         # torch.Size([32, 75])
        # 计算损失，将预测值和实际标签传入损失函数
        loss = criterion(outputs, labels)       # tensor(4.5126, device='cuda:0', grad_fn=<NllLossBackward0>)
        # 初始为0，清除上个batch的梯度信息
        optimizer.zero_grad(set_to_none=True)
        # 反向传播，计算梯度
        loss.backward()
        # 根据计算出的梯度更新模型参数
        optimizer.step()

        # 统计
        running_loss += loss.item() * images.size(0)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    # 计算损失值
    train_loss = running_loss / len(train_dataset)
    # 计算准确度
    train_accuracy = 100 * correct / total

    train_metrics['loss'].append(train_loss)
    train_metrics['accuracy'].append(train_accuracy)

    print(f"Epoch [{epoch + 1}/{num_epochs}], Train Loss: {train_loss:.4f}, Train Accuracy: {train_accuracy:.2f}%")

end_time = time.time()
elapsed_time = end_time - start_time  # 计算执行时间差
print(f"代码执行时间：{elapsed_time} 秒")


parameter = '_'+str(num_epochs)+'_'+str(batch_size)+'_'+str(lr)+'_'+str(momentum)
torch.save(model.state_dict(), r'models\butterfly_SqueezeNet'+parameter+'.pth')
print("Model saved successfully!")


# 创建一个包含 'Train Loss' 和 'Train Accuracy' 列名的新DataFrame
data_to_save = pd.DataFrame({
    'Train Loss': train_metrics['loss'],
    'Train Accuracy': train_metrics['accuracy']
})

# 将数据保存为CSV文件
data_to_save.to_csv(r'train_metrics\SqueezeNet_train_metrics'+ parameter +'.csv', index=False)


txt_file_path = 'train_metrics/elapsed_time.txt'
try:
    if not os.path.exists(txt_file_path):
        with open(txt_file_path, 'w') as txtfile:
            pass

    with open(txt_file_path, 'a') as txtfile:
        txtfile.write(f"SqueezeNet{parameter}: {elapsed_time}\n")

    print(f"Elapsed time added to {txt_file_path}")
except Exception as e:
    print(f"Error: {e}")

plt.figure(figsize=(10, 5))
plt.plot(train_metrics['loss'], label='Train Loss')
plt.plot(train_metrics['accuracy'], label='Train Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Value')
plt.title('Learning Curve')
plt.legend()
plt.savefig('train_metrics/SqueezeNet'+parameter+'.png')
plt.show()