import torch
import torch.nn as nn
from tqdm import tqdm
from PIL import Image

import numpy as np

import config
import os

# 测试集目录
app_config = config.get_app_config()
train_path = app_config.train_path
test_path = app_config.test_path
model_file_path = app_config.model_file_path
device = config.device
from train import train


def traverse_files(folder):
    image_files = []
    for root, dirs, files in os.walk(folder):
        # if len(dirs) > 0:
        #     for d in dirs:
        #         print("\t- ", d)
        if len(files) > 0:
            for f in files:
                filepath = os.path.join(root, f)
                image_files.append(filepath)
    return image_files

if __name__ == '__main__':


    file = open("filename.txt", "w")
    image_files = traverse_files(os.path.join("data", "test"))
    print(len(image_files))

    app_config = config.get_app_config()
    test_path = app_config.test_path
    train_path = app_config.train_path
    device = config.device

    transform = config.transform

    model, features = train()
    model.eval()

    top1_acc = 0
    top5_acc = 0

    for file_path in tqdm(image_files):
        # print("test_path:{}", file_path)
        real_clazz = file_path.split("\\")[-2]
        # 对查询图像进行特征提取并计算相似度
        query_image = Image.open(file_path).convert('RGB')
        query_image_tensor = transform(query_image)
        query_image_tensor = torch.unsqueeze(query_image_tensor, 0).to(device)

        with torch.no_grad():
            query_feature_tensor = model(query_image_tensor)
            query_feature_vector = torch.flatten(query_feature_tensor, start_dim=1).cpu().numpy()
            query_feature_vector = np.squeeze(query_feature_vector)

        similarities = {}
        for filename, feature_vector in features.items():
            feature_vector = np.squeeze(feature_vector)
            similarity = np.dot(query_feature_vector, feature_vector) / (
                    np.linalg.norm(query_feature_vector) * np.linalg.norm(feature_vector))
            similarities[filename] = similarity

        # 根据相似度排序并返回相似度最高的图像
        sorted_similarities = sorted(similarities.items(), key=lambda x: x[1], reverse=True)[:5]

        i = 0
        # 显示相似图像
        query_clazz_list = []
        for path, score in sorted_similarities:
            query_clazz_list.append(path.split("/")[-2])

        if query_clazz_list[0] == real_clazz:
            top1_acc += 1

        if real_clazz in query_clazz_list:
            top5_acc += 1
        else:
            file.writelines(file_path)

    file.close()
    # 计算Top1错误率
    top1_accuracy = top1_acc / len(image_files)
    print('Top1正确率:', top1_accuracy)

    # 计算Top5错误率
    top5_accuracy = top5_acc / len(image_files)
    print('Top5正确率:', top5_accuracy)



