import os
from tqdm import tqdm
from torch.utils.data import DataLoader
from gen_dataset import MyData, transform, fun, GenData
from model import ResNet18_PRO
from DTs import *
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import cv2
import random
import time
# index = list(range(0, 270))
def Gen_testData(path, h, w):
    """
    :param path: the directory of raw pictures
    :param batch_size:
    :return: train_load
    """
    num_pics = len(os.listdir(path))
    data = np.zeros((num_pics, h, w, 3), dtype=np.uint8)
    index = list(range(num_pics))
    label = list(map(lambda x: fun(x, 89, 179), index))
    # label = list(range(0, num_pics))
    # for i, filename in enumerate(os.listdir(path)):
    #     if filename.startswith("Cr"):
    #         label[i] = 0
    #     elif filename.startswith("In"):
    #         label[i] = 1
    #     elif filename.startswith("Pa"):
    #         label[i] = 2
    #     elif filename.startswith("PS"):
    #         label[i] = 3
    #     elif filename.startswith("RS"):
    #         label[i] = 4
    #     elif filename.startswith("Sc"):
    #         label[i] = 5
    label = torch.Tensor(label)

    for i, filename in enumerate(os.listdir(path)):
        if filename.endswith("jpg"):
            img = cv2.imread(os.path.join(path, filename))
            img = cv2.resize(img, (h, w))
            data[i] = img

    index = list(range(0, num_pics))
    random.shuffle(index)
    data = data[index]
    label = label[index]
    return data, label





def test(Model, device, test_dataloader):
    test_loss = []
    test_acc_dict = {"total": [], "hole": [], "crack": [], "normal": []}
    test_precision_dict = {"total": [], "hole": [], "crack": [], "normal": []}
    test_recall_dict = {"total": [], "hole": [], "crack": [], "normal": []}
    test_f1_dict = {"total": [], "hole": [], "crack": [], "normal": []}
    element_list = [accuracy_score, precision_score, recall_score, f1_score]
    data_name = ["total", "hole", "crack", "normal"]
    m = len(test_dataloader)
    criterion = nn.CrossEntropyLoss()
    model = Model
    with torch.no_grad():
        sum_loss = 0.0
        total_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
        hole_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
        crack_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
        normal_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
        metric_result_list = [total_metrics, hole_metrics, crack_metrics, normal_metrics]
        reslist = {}
        with tqdm(total=m, desc='Val', leave=True, ncols=100, unit='batch', unit_scale=True)as pbar:
            for batch_idx, (images, labels) in enumerate(test_dataloader):
                model.eval()
                t1 = time.time()
                images, labels = images.to(device), labels.to(device)
                labels = labels.long()
                outputs = model(images)
                loss = criterion(outputs, labels)
                sum_loss += loss.item()
                _, predicted = torch.max(outputs.data, dim=1)
                labels = labels.cpu()
                predicted = predicted.cpu()
                # for pos ,res in enumerate(labels==predicted):
                #     if not res:
                #         reslist[index[pos+batch_idx*30]]=[labels[pos],predicted[pos]]

                for c, metric_result in enumerate(metric_result_list):
                    for idx, element in enumerate(metric_result.keys()):
                        if idx == 0:
                            metric_result[element] += element_list[idx](labels, predicted)
                        else:
                            if c == 0:
                                labels_pos = [0, 1, 2]
                            else:
                                labels_pos = [c - 1]
                            metric_result[element] += element_list[idx](labels, predicted, labels=labels_pos,
                                                                        average="macro")
                # t2 = time.time()
                # print(f'test speed:{(t2-t1)/30}s')
                pbar.update()
            test_loss.append(sum_loss / m)
            for idx, metric_result in enumerate(metric_result_list):
                average_acc = metric_result["accuracy"] / m
                average_precision = metric_result["precision"] / m
                average_recall = metric_result["recall"] / m
                average_f1 = metric_result["f1"] / m

                test_acc_dict[data_name[idx]].append(average_acc)
                test_precision_dict[data_name[idx]].append(average_precision)
                test_recall_dict[data_name[idx]].append(average_recall)
                test_f1_dict[data_name[idx]].append(average_f1)
            # return average_acc,average_precision,average_recall,average_f1
            return test_acc_dict, test_precision_dict, test_recall_dict, test_f1_dict


def main():
    # 1. load dataset
    path = "test_data"  # 测试图片的路径
    packpath = "result/var_dpi/"  # 变分辨率图片训练所得结果所保存的路径，每个文件名应以_dpi(如"_416")结尾。
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    batch_size = 32
    num_class = 3  # 缺陷类别数
    count = 10  # count 为测试的次数，最终结果取平均值
    for filename in os.listdir(packpath):
        if filename.startswith("result"):
            continue
        acc = []
        hole = []
        crack = []
        normal = []
        total = []
        print(f"Testing {filename} model , please waiting.....")
        modelpath = os.path.join(packpath, filename)
        for file in os.listdir(modelpath):
            if file.endswith(".pt"):
                pthpath = os.path.join(modelpath, file)
                model = ResNet18_PRO(device, num_class, test_pth_path=pthpath, Trans_Learning=False, Freeze=False)

        # h,w为测试图片的高和宽
        h = int(filename.split('_')[-1])
        w = h
        # x_test, y_test = Gen_testData(path, h, w)
        x_test, y_test = GenData(path, h, w, a=89, b=179)
        test_dataset = MyData(x_test, y_test, h, w, transform=transform)
        test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=True)

        for i in range(count):
            test_acc_dict, test_precision_dict, test_recall_dict, test_f1_dict = test(model, device, test_dataloader)
            acc.append(test_acc_dict["hole"])
            total.append(test_f1_dict["total"])
            hole.append(test_f1_dict["hole"])
            crack.append(test_f1_dict["crack"])
            normal.append(test_f1_dict["normal"])
        acc = np.array(acc)
        hole = np.array(hole)
        total = np.array(total)
        crack = np.array(crack)
        normal = np.array(normal)
        print(f"acc:{acc.mean():04f} hole:{hole.mean():04f} crack:{crack.mean():04f} normal:{normal.mean():04f} "
              f"total:{total.mean():04f}")


if __name__ == "__main__":
    main()
