import os
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from torchvision.utils import make_grid
import torchvision.models as models
import matplotlib.pyplot as plt
from torch.utils.data.sampler import SubsetRandomSampler
import random
import warnings
import time


def train(net, device, train_loader, optimizer, loss_function, epochs):
    ########################################## train ###############################################
    net.train()  # 训练过程中开启 Dropout
    running_loss = 0.0  # 每个 epoch 都会对 running_loss  清零
    time_start = time.perf_counter()  # 对训练一个 epoch 计时

    # enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列，同时列出数据和数据下标，一般用在 for 循环当中
    for step, data in enumerate(train_loader, start=0):  # 遍历训练集，step从0开始计算
        images, labels = data  # 获取训练集的图像和标签
        optimizer.zero_grad()  # 清除历史梯度

        outputs = net(images.to(device))  # 正向传播
        loss = loss_function(outputs, labels.to(device))  # 计算损失

        loss.backward()  # 反向传播
        optimizer.step()  # 优化器更新参数
        running_loss += loss.item()

        # 打印训练进度（使训练过程可视化）
        rate = (step + 1) / len(train_loader)  # 当前进度 = 当前step / 训练一轮epoch所需总step
        a = "*" * int(rate * 50)
        b = "." * int((1 - rate) * 50)
        print("\rtrain loss: {:^3.0f}%[{}->{}]{:.3f}".format(int(rate * 100), a, b, loss), end="")
    print()
    print('%f s,epoch %s' % (time.perf_counter() - time_start, epochs))


def test(model, device, test_dl, loss_function):
    model.eval()
    all_loss = 0
    all_acc = 0
    with torch.no_grad():
        for idx, (data, labels) in enumerate(test_dl):
            data, labels = data.to(device), labels.to(device)
            output = model(data)
            loss = loss_function(output, labels)
            all_loss += loss.item()
            acc = accuracy(output, labels)  # Calculate accuracy
            all_acc += acc
        print('val loss: {:.3f}, val acc: {:.3f}'.format(all_loss / len(test_dl), all_acc / len(test_dl)))


warnings.filterwarnings('ignore')
# matplotlib inline

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 图像预处理
data_transform = {
    "train": transforms.Compose([transforms.RandomResizedCrop(224),  # 随机裁剪，再缩放成 224×224
                                 transforms.RandomHorizontalFlip(p=0.5),  # 水平方向随机翻转，概率为 0.5, 即一半的概率翻转, 一半的概率不翻转
                                 transforms.ToTensor(),  # 将输入的数据shape H，W，C ——> C，H，W，将数据归一化到[0，1]
                                 transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),

    "val": transforms.Compose([transforms.Resize((224, 224)),  # cannot 224, must (224, 224)
                               transforms.ToTensor(),
                               transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}

# 获取图像数据集的路径
image_path = r'D:\share\python\data\dog-dataset\imagewoof2-160'

BATCH_SIZE = 32
# Epoch：将所有训练样本训练一次的过程
# Batch：将整个训练样本分成若干个Batch
# Batch Size：一次训练所选取的样本数，Batch Size的大小影响模型的优化程度和速度。同时其直接影响到GPU内存的使用情况，假如GPU内存不大，该数值最好设置小一点。
# 按batch_size分批次加载训练集

# 导入验证集并进行预处理

# 导入训练集并进行预处理
train_dataset = datasets.ImageFolder(root=image_path + "/train",
                                     transform=data_transform["train"])
train_dl = torch.utils.data.DataLoader(train_dataset,  # 导入的训练集
                                       batch_size=BATCH_SIZE,  # 每批训练的样本数
                                       shuffle=True,  # 是否打乱训练集
                                       num_workers=0)  # 使用线程数，在windows下设置为0

validate_dataset = datasets.ImageFolder(root=image_path + "/val",
                                        transform=data_transform["val"])
# 加载验证集
valid_dl = torch.utils.data.DataLoader(validate_dataset,  # 导入的验证集
                                       batch_size=BATCH_SIZE,
                                       shuffle=True,
                                       num_workers=0)

print(f'训练数据集长度 = {len(train_dataset)}')
print(f'验证数据集长度 = {len(validate_dataset)}')


# 基于批量归一化的预训练 ResNet 模型的定义
def accuracy(outputs, labels):
    _, preds = torch.max(outputs, dim=1)
    return torch.tensor(torch.sum(preds == labels).item() / len(preds))


class Resnet50(nn.Module):
    def __init__(self):
        super().__init__()
        # Use a pretrained model
        self.network = models.resnet50(weights=models.resnet.ResNet50_Weights.IMAGENET1K_V1)
        # Replace last layer
        num_ftrs = self.network.fc.in_features
        self.network.fc = nn.Linear(num_ftrs, 10)

    def forward(self, xb):
        return torch.sigmoid(self.network(xb))

    def freeze(self):
        # To freeze the residual layers
        for param in self.network.parameters():
            param.require_grad = False
        for param in self.network.fc.parameters():
            param.require_grad = True

    def unfreeze(self):
        # Unfreeze all layers
        for param in self.network.parameters():
            param.require_grad = True


model = Resnet50().to(device=device)

history = []
# 训练


epochs = 10
max_lr = 0.0001
grad_clip = 0.1
weight_decay = 1e-4
opt_func = torch.optim.Adam

optimizer = torch.optim.Adam(model.parameters(), max_lr, weight_decay=weight_decay)
loss_function = F.cross_entropy
# 训练过程
# for epoch in range(epochs):
#     train(model, device, train_dl, optimizer, loss_function, epoch)
# torch.save(model.state_dict(), 'RES.pth')

# 测试过程以及可视化
model.load_state_dict(torch.load('RES.pth'))

test(model, device, valid_dl, loss_function)

val_num = len(validate_dataset)
indices = list(range(val_num))
random.shuffle(indices)
i = 0
fig = plt.figure(figsize=(25, 4))
indices_ = indices[0: 9]
for idx, (data, label) in enumerate(validate_dataset):
    if idx in indices_:
        view = data.float().view(1, 3, 224, 224)
        output = model(view.to(device))
        _, result = torch.max(output, dim=1)
        ax = fig.add_subplot(1, 9, i + 1, xticks=[], yticks=[])
        ax.imshow(data[0])
        ax.set_title('real:' + str(label) + 'ped:' + str(result.item()))
        i = i + 1
plt.show()
