from __future__ import print_function, division
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from torchvision import models
import cv2
import numpy as np
import os
from lib.network import Network
import os


def del_dir(path):
    for i in os.listdir(path):
        path_file = os.path.join(path, i)       # 取文件绝对路径
        if os.path.isfile(path_file):
            os.remove(path_file)
        else:
            del_dir(path_file)


def rm_mkdir(dir_path):
    if os.path.exists(dir_path):
        del_dir(dir_path)
        print('Clean path - %s' % dir_path)
    else:
        os.makedirs(dir_path)
        print('Create path - %s' % dir_path)


def load_model(test_dir):
    # test_datasets = torchvision.datasets.MNIST(root='./mnist/',
    #                                        transform=torchvision.transforms.ToTensor(),
    #                                        train=False)
    val_transforms = transforms.Compose([
        transforms.Grayscale(),
        transforms.ToTensor()
    ])
    test_datasets = datasets.ImageFolder(test_dir, transform=val_transforms)

    net = Network()
    if torch.cuda.is_available():
        net = nn.DataParallel(net)
        net.cuda()
    state_dict = torch.load(r".\mnist_net.pth")
    net.load_state_dict(state_dict)
    return net, test_datasets


def run():
    test_dir = r'H:\yuanbaoxi\ybx_gitee\non_local\mnist\testimgs'
    net, test_datasets = load_model(test_dir)
    test_dataloader = torch.utils.data.DataLoader(test_datasets, batch_size=8000, shuffle=False)

    rm_mkdir("./result")
    name_list_en = os.listdir(test_dir)
    with torch.no_grad():
        correct = 0
        total = 0
        err_idx = 0
        cls_idx = test_datasets.class_to_idx
        cls = test_datasets.classes
        class_num = len(cls)
        conf_mat = np.zeros((class_num, class_num), np.uint32)
        for data in test_dataloader:
            net.eval()
            inputs_ori, labels_ori = data
            inputs, labels = Variable(inputs_ori).cuda(), Variable(labels_ori).cuda()
            outputs = net(inputs)
            # 取得分最高的那个类 (outputs.data的索引号)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum()
            print("total[%d],correct[%d]" % (total, correct))
            predicted_cpu = Variable(predicted).cpu()
            predicted_np = predicted_cpu.numpy()
            labels_np = labels_ori.numpy()
            # cl_list = test_datasets

            for i in range(0, len(labels_ori)):
                pred = int(predicted_np[i])
                lbl = int(labels_np[i])
                conf_mat[int(lbl), int(pred)] += 1
                if pred != lbl:
                    save_path = "./result/%d_%s_%s.jpg" % (err_idx, name_list_en[lbl], name_list_en[pred])
                    img_np = inputs_ori[i].numpy()
                    img_np = np.transpose(img_np, (1, 2, 0))
                    img_np = (img_np * 0.5 + 0.5) * 255
                    img_np = img_np.astype("uint8")
                    img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
                    cv2.imwrite(save_path, img_np)
                    err_idx += 1

        print('测试分类准确率为：%.3f%%' % (100 * correct / total))
        acc = 100. * correct / total
        print(conf_mat)
        fw = open("conf_mat.txt", "w")
        fw.write("\t")
        for i in range(0, class_num):
            fw.write("%s\t" % name_list_en[i])
        fw.write("\n")
        for i in range(0, class_num):
            fw.write("%s\t" % name_list_en[i])
            for j in range(0, class_num):
                fw.write("%d\t" % conf_mat[i, j])
            fw.write("\n")
        fw.close()


if __name__ == "__main__":
    run()
    # train()