import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from torchvision import datasets, transforms
from datetime import datetime
import matplotlib.pyplot as plt


def data_preproc_load(data_improve, b_s, train_path, test_path):
    if data_improve:
        data_transform = transforms.Compose([
            transforms.RandomHorizontalFlip(),  # 随机水平翻转
            transforms.RandomVerticalFlip(),  # 随机垂直翻转
            transforms.RandomRotation(30),  # 随机旋转30度
            transforms.RandomResizedCrop(224, scale=(0.8, 1.0)),  # 随机裁剪并缩放
            transforms.ToTensor(),
            transforms.Lambda(lambda x: x.float())
        ])
    else:
        data_transform = transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Lambda(lambda x: x.float())
        ])

    train_data = datasets.ImageFolder(root=train_path, transform=data_transform)
    test_data = datasets.ImageFolder(root=test_path, transform=data_transform)
    train_loader = DataLoader(dataset=train_data, batch_size=b_s, shuffle=True)
    test_loader = DataLoader(dataset=test_data, batch_size=b_s)

    return train_loader, test_loader


def train_model(model, criterion, optimizer, train_loader, device, epoch):
    model.train()  # 启用模型训练模式
    progress_bar = tqdm(enumerate(train_loader), total=len(train_loader))  # 创建一个进度条
    curr_loss = 0  # 初始化当前损失
    correct = 0  # 初始化正确预测的数量
    total = 0  # 初始化总的样本数量
    for i, (images, labels) in progress_bar:  # 遍历数据加载器中的每一批数据
        images = images.to(device)  # 将图像数据移动到指定的设备上（CPU或GPU）
        labels = labels.to(device).long()  # 将标签数据移动到指定的设备上，并转换为长整型
        outputs = model(images)  # 将图像数据输入模型，得到输出
        loss = criterion(outputs, labels)  # 计算损失
        curr_loss += loss.item()  # 累加损失
        _, predicted = torch.max(outputs.data, 1)  # 获取预测的类别
        total += labels.size(0)  # 累加样本数量
        correct += (predicted == labels).sum().item()  # 累加正确预测的数量
        optimizer.zero_grad()  # 清零优化器的梯度
        loss.backward()  # 反向传播，计算梯度
        optimizer.step()  # 使用优化器更新模型参数

        progress_bar.set_description(f'Epoch: {epoch + 1}, Step: {i + 1}, Loss: {loss.item()}')  # 更新进度条的描述

    curr_loss /= len(train_loader)  # 计算平均损失
    curr_train_acc = 100 * correct / total  # 计算训练准确率
    print(f'Epoch: {epoch + 1}, Loss: {curr_loss}, Accuracy: {curr_train_acc}%')  # 打印训练结果
    return curr_loss, curr_train_acc  # 返回平均损失和训练准确率


def validate_model(model, criterion, test_loader, device, num_classes, classes):
    model.eval()  # 切换模型为评估模式
    correct = 0  # 初始化正确预测的数量
    total = 0  # 初始化总的样本数量
    total_loss = 0.0  # 初始化总损失
    class_correct = list(0. for i in range(num_classes))  # 初始化每个类别的正确预测数量
    class_total = list(0. for i in range(num_classes))  # 初始化每个类别的样本数量
    with torch.no_grad():  # 关闭梯度计算，节省计算资源，因为在评估模式下，我们不需要更新模型参数
        for images, labels in test_loader:  # 遍历数据加载器中的每一批数据
            images = images.to(device)  # 将图像数据移动到指定的设备上（CPU或GPU）
            labels = labels.to(device)  # 将标签数据移动到指定的设备上
            outputs = model(images)  # 将图像数据输入模型，得到输出
            _, predicted = torch.max(outputs.data, 1)  # 获取预测的类别
            total += labels.size(0)  # 累加样本数量
            correct += (predicted == labels).sum().item()  # 累加正确预测的数量

            c = (predicted == labels).squeeze()  # 去掉维度为1的维度，方便后续计算
            for i in range(len(labels)):  # 遍历每个样本的标签
                label = labels[i]  # 获取当前样本的真实标签
                class_correct[label] += c[i].item()  # 如果预测正确，那么对应类别的正确预测数量加一
                class_total[label] += 1  # 对应类别的样本数量加一

        loss = criterion(outputs, labels)  # 计算损失
        total_loss += loss.item() * images.size(0)  # 累加损失
        curr_val_acc = 100 * correct / total  # 计算验证准确率
        avg_loss = total_loss / total  # 计算平均损失
        print(f'Validation Accuracy: {curr_val_acc}%')  # 打印验证准确率

        for i in range(num_classes):  # 遍历每个类别
            if class_total[i] > 0:  # 如果该类别的样本数量大于0
                print('Accuracy of %5s : %2d %%' % (
                    classes[i], 100 * class_correct[i] / class_total[i]))  # 打印该类别的准确率
            else:  # 如果该类别的样本数量等于0
                print('Accuracy of %5s : N/A (no training examples)' % (classes[i]))  # 打印无法计算准确率的信息

        return curr_val_acc, avg_loss  # 返回验证准确率和平均损失


def save_model_and_log(best_model_params, model_name, num_classes, model_val_acc, model_train_acc, b_s, num_epoch, model_epoch,
                       data_improve, optimizer, lr, w_d, scheduler, scheduler_mode, scheduler_factor, min_lr):
    current_time = datetime.now().strftime('%y-%m-%d_%H:%M')
    if best_model_params is not None:
        torch.save(best_model_params,
                   f'Trained_models/{model_name}_CL:{num_classes}_VA:{model_val_acc:.2f}_TA:{model_train_acc:.2f}_{current_time}.pth')

    with open(f'Train_Record/{model_name}TrainLog.txt', 'a') as f:
        f.write(f'Time:{current_time}\n'
                f'model_name:{model_name} classes:{num_classes} batch_size:{b_s} num_epoch:{num_epoch} stop_epoch:{model_epoch} data_improve:{data_improve}\n'
                f'optimizer:{type(optimizer).__name__}\nlr:{lr}\nweight_decay:{w_d}\n'
                f'scheduler:{type(scheduler).__name__ if scheduler is not None else None} mode={scheduler_mode} factor={scheduler_factor} min_lr={min_lr}\n'
                f'val_acc:{model_val_acc:.2f} train_acc:{model_train_acc:.2f}\n'
                f'model_name:ST_RNRs50_CL:{num_classes}_VA:{model_val_acc:.2f}_TA:{model_train_acc:.2f}_{current_time}.pth\n\n')


def plot_and_save_fig(train_losses, val_losses, train_accuracies, val_accuracies):
    current_time = datetime.now().strftime('%y-%m-%d_%H:%M')
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(range(1, len(train_losses) + 1), train_losses, label='Train')
    plt.plot(range(1, len(val_losses) + 1), val_losses, label='Validation')
    plt.title('Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(range(1, len(train_accuracies) + 1), train_accuracies, label='Train')
    plt.plot(range(1, len(val_accuracies) + 1), val_accuracies, label='Validation')
    plt.title('Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()

    plt.savefig(f'Train_Record/{current_time}.png')