
from detect import img_detective
# import detect
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torchvision import datasets, models, transforms
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import random_split
import math
import time
from PIL import Image
from torchvision import transforms
import torchvision.models as models

import sys
import os
sys.path.append("./")
sys.path.append('./')


batch_size = 8
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # device object
num_classes = 10
basic_dir = 'D:/BaiduNetdiskDownload/'
data_dir = basic_dir + 'img_classes/'
val_dir = basic_dir + 'val_classes/'
model_dir = basic_dir + 'models/'
train_epochs = 2


def load_data():
    """ResNet加载训练、验证数据集
    Args:
        []
    Returns:
        [DataLoader]: [训练集数据加载器],
        [DataLoader]: [验证集数据加载器],
        [Dataset]: [验证数据集],
        [Dataset]: [训练数据集]
    """

    transforms_train = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),  # data augmentation
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])  # normalization
    ])

    transforms_val = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    all_datasets = datasets.ImageFolder(os.path.join(data_dir), transforms_train)
    all_datasets_length = len(all_datasets)

    train_datasets, val_datasets = random_split(
        dataset=all_datasets,
        lengths=[math.ceil(all_datasets_length * .8), math.floor(all_datasets_length * .2)],
        generator=torch.Generator().manual_seed(0)
    )

    # val_datasets = datasets.ImageFolder(os.path.join(data_dir), transforms_val)

    train_dataloader = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True, num_workers=4)
    val_dataloader = torch.utils.data.DataLoader(val_datasets, batch_size=batch_size, shuffle=True, num_workers=4)

    print('Train dataset size:', len(train_datasets))
    print('Validation dataset size:', len(val_datasets))

    class_names = all_datasets.classes
    print('Class names:', class_names)
    return train_dataloader, val_dataloader, train_datasets, val_datasets, all_datasets


def imshow(input, title):
    # torch.Tensor => numpy
    input = input.numpy().transpose((1, 2, 0))
    # undo image normalization
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    input = std * input + mean
    input = np.clip(input, 0, 1)
    # display images
    plt.imshow(input)
    plt.title(title)
    plt.show()


def plot_img(train_dataloader, class_names):
    plt.rcParams['figure.figsize'] = [12, 8]
    plt.rcParams['figure.dpi'] = 60
    plt.rcParams.update({'font.size': 20})
    # load a batch of train image
    iterator = iter(train_dataloader)

    # visualize a batch of train image
    inputs, classes = next(iterator)
    out = torchvision.utils.make_grid(inputs[:4])
    imshow(out, title=[class_names[x] for x in classes[:4]])


def create_model():
    """创建ResNet152模型
    Args:
            []
    Returns:
        [ResNet]: [ResNet152模型]
        [criterion]: [损失函数]
        [optimizer]: [模型优化器]

    """
    model = models.resnet152(pretrained=True)
    num_features = model.fc.in_features
    model.fc = nn.Linear(num_features, num_classes)  # binary classification (num_of_class)
    model = model.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    return model, criterion, optimizer


def train(model, optimizer, criterion, val_datasets):
    """
     模型训练
    Args：
        [model]: [构建的训练模型],
        [optimizer]: [模型优化器],
        [criterion]: [损失函数],
        [val_datasets]: [验证数据集]
    Returns:
        [ResNet]: [ResNet152模型]
        [criterion]: [损失函数]
        [optimizer]: [模型优化器]
    """
    num_epochs = train_epochs
    start_time = time.time()

    for epoch in range(num_epochs):
        model.train()

        running_loss = 0.
        running_corrects = 0

        # load a batch data of images
        for i, (inputs, labels) in enumerate(train_dataloader):
            inputs = inputs.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            loss = criterion(outputs, labels)

            # get loss value and update the network weights
            loss.backward()
            optimizer.step()

            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)

        epoch_loss = running_loss / len(train_datasets)
        epoch_acc = running_corrects / len(train_datasets) * 100.
        print('[Train #{}] Loss: {:.4f} Acc: {:.4f}% Time: {:.4f}s'.format(
            epoch, epoch_loss, epoch_acc, time.time() - start_time))

        if not os.path.exists(model_dir):
            os.mkdir(model_dir)

        torch.save(model.state_dict(), model_dir + 'resnet.pt')

        """ Validation"""
        model.eval()

        with torch.no_grad():
            running_loss = 0.
            running_corrects = 0

            for inputs, labels in val_dataloader:
                inputs = inputs.to(device)
                labels = labels.to(device)

                outputs = model(inputs)
                _, preds = torch.max(outputs, 1)
                loss = criterion(outputs, labels)

                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

            epoch_loss = running_loss / len(val_datasets)
            epoch_acc = running_corrects / len(val_datasets) * 100.
            print('[Validation #{}] Loss: {:.4f} Acc: {:.4f}% Time: {:.4f}s'.format(
                epoch, epoch_loss, epoch_acc, time.time() - start_time))


def predict(class_names):
    """
    predict data
    """
    model = models.resnet152()
    num_features = model.fc.in_features
    model.fc = nn.Linear(num_features, num_classes)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    model.load_state_dict(torch.load(model_dir + 'resnet.pt'))
    model.to(device)

    model.eval()
    start_time = time.time()

    with torch.no_grad():
        running_loss = 0.
        running_corrects = 0

        for i, (inputs, labels) in enumerate(val_dataloader):
            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            loss = criterion(outputs, labels)

            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)

            if i == 0:
                print('[Prediction Result Examples]')
                images = torchvision.utils.make_grid(inputs[:4])
                imshow(images.cpu(), title=[class_names[x] for x in labels[:4]])
                images = torchvision.utils.make_grid(inputs[4:8])
                imshow(images.cpu(), title=[class_names[x] for x in labels[4:8]])

        # epoch_loss = running_loss / len(val_datasets)
        # epoch_acc = running_corrects / len(val_datasets) * 100.
        # print('[Validation #{}] Loss: {:.4f} Acc: {:.4f}% Time: {:.4f}s'.format(, epoch_loss, epoch_acc, time.time() - start_time))


def img_classify(img_path, all_classes):
    """模型分类检测

    Args:
        img_path ([type]): 待检测数据图片路径
        all_classes ([type]): 所有图片分类列表

    Returns:
        [img_class]: 图片所属类别
    """

    model = models.resnet152()
    num_features = model.fc.in_features
    model.fc = nn.Linear(num_features, num_classes)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    model.load_state_dict(torch.load(model_dir + 'resnet.pt'))
    model.to(device)

    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    img = Image.open(img_path)

    img_t = transform(img)
    batch_t = torch.unsqueeze(img_t, 0)
    model.eval()
    batch_t = batch_t.to(device)
    out = model(batch_t)
    print(out.shape)
    _, indices = torch.sort(out, descending=True)
    # print(all_classes[percentage.argmax()])
    # prediction = [[all_classes[idx], percentage[idx].item()] for idx in indices[0][:5]]
    # print(prediction)

    # 返回分类概率最大值的类别编号
    return torch.nn.functional.softmax(out, dim=1)[0] * 100


if __name__ == "__main__":
    img_path = 'E:/py_projs/pcb_detect/yolov5/data/1.jpg'

    train_dataloader, val_dataloader, train_datasets, val_datasets, all_datasets = load_data()
    # plot_img(train_dataloader, train_datasets.classes)
    model, criterion, optimizer = create_model()
    # train(model, optimizer, criterion, val_datasets)
    # predict(all_datasets.classes, )
    # img_class = img_classify(img_path, all_datasets.classes)

    img_detective(img_path=img_path, model_num='01')
