import time
import torch
import clip
from PIL import Image
import os
from torchvision import transforms
import numpy as np
def get_img_name(img_dir, format="jpg"):
    """
    获取文件夹下format格式的文件名
    :param img_dir: str
    :param format: str
    :return: list
    """
    file_names = os.listdir(img_dir)
    # 使用 list(filter(lambda())) 筛选出 jpg 后缀的文件
    img_names = list(filter(lambda x: x.endswith(format), file_names))

    if len(img_names) < 1:
        raise ValueError("{}下找不到{}格式数据".format(img_dir, format))
    return img_names


def img_transform(img_rgb, transform=None):
    """
    将数据转换为模型读取的形式
    :param img_rgb: PIL Image
    :param transform: torchvision.transform
    :return: tensor
    """

    if transform is None:
        raise ValueError("找不到transform！必须有transform对img进行处理")

    img_t = transform(img_rgb)
    return img_t

if __name__ == "__main__":
    img_dir = "/media/lihongsen/home/luoluoluo/dataset/clip_train_3/test/0"
    img_list, img_pred = list(), list()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # 1. data
    img_names = get_img_name(img_dir)
    num_img = len(img_names)
    # print(num_img)
    text_idx = 0
    idx_to_class = {
        0:"a photo without person",
        1:"a photo of one person",
        2:"a photo of two person",
    }
    label_token = clip.tokenize(["a photo without person","a photo of one person", "a photo of two person"]).to(device)
    # 2. model
    clip_model, preprocess = clip.load("ViT-B/32", device=device)
    checkpoint = torch.load("clip_epoch_90.pth")
    clip_model.load_state_dict(checkpoint)
    clip_model.to(device)
    clip_model.eval()

    # inference_transform = transforms.Compose([
    #     transforms.Resize((128, 128)),
    #     transforms.ToTensor(),
    #     # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    # ])
    cnt = 0
    with torch.no_grad():
        for idx, img_name in enumerate(img_names):

            path_img = os.path.join(img_dir, img_name)

            # step 1/4 : path --> img
            # img_rgb = Image.open(path_img)

            # step 2/4 : img --> tensor
            img_tensor = preprocess(Image.open(path_img)).unsqueeze(0).to(device)

            # step 3/4 : tensor --> vector
            time_tic = time.time()
            image_features = clip_model.encode_image(img_tensor)
            text_features = clip_model.encode_text(label_token)
            logits_per_image, logits_per_text = clip_model(img_tensor, label_token)
            probs = logits_per_image.softmax(dim=-1).cpu().numpy()
            print("Label probs:", probs)  # prints: [[0.9927937  0.00421068 0.00299572]]

            # step 4/4 : visualization
            pred_int = probs[0].argmax(axis=0)
            if text_idx == pred_int:
                cnt += 1
            # pred_str = classes[int(pred_int)]
    print(f"真实预测标签为{idx_to_class[text_idx]}")
    print(f"总的预测数量为{num_img}")
    print(f"预测对的数量为{cnt}")
    print(f"预测错的数量为{num_img - cnt}")
