from PIL import Image
import torch
import glob
import numpy as np
from model_text import CLIPText
from model_vision_vit import CLIPViT
from torchvision import transforms
from tokenizer import tokenize


def load_and_transform_vision_data(image_paths):
    if image_paths is None:
        return None

    image_outputs = []
    for image_path in image_paths:
        data_transform = transforms.Compose(
            [
                transforms.Resize(
                    256, interpolation=transforms.InterpolationMode.BICUBIC
                ),
                transforms.CenterCrop(256),
                transforms.ToTensor(),
                transforms.Normalize(
                    mean=(0.48145466, 0.4578275, 0.40821073),
                    std=(0.26862954, 0.26130258, 0.27577711),
                ),
            ]
        )
        with open(image_path, "rb") as fopen:
            image = Image.open(fopen).convert("RGB")

        image = data_transform(image)
        image_outputs.append(image)
        # print(image_path, image)
    # print(len(image_outputs))
    return torch.stack(image_outputs, dim=0)

#提取图像特征
def get_visions_norm_features(visions_model, images_path_list=["./images/CLIP.png"]):
    with torch.no_grad(), torch.cuda.amp.autocast():
        image = load_and_transform_vision_data(images_path_list)
        image_features = visions_model.encode_image(image)
        image_features /= image_features.norm(dim=-1, keepdim=True)
        return  image_features



texts_list_tmp = ['A white truck', 'A camouflage truck', 'A white truck on the land', 'A red truck', 'red truck','red car','A black car with a silver round','A black vehicle with a silver round','A black car with a silver round on the land']

texts_list_vehicle = ['A picture of yellow bus', 'A picture of white car', 'A picture of red truck']

texts_list_h20t = ['A photo of white vehicle with six wheels', 'A photo of red vehicle with four wheels', 'A photo of black vehicle with four wheels']

texts_list_tuzhuang = ['A photo of black vehicle', 'A photo of yellow vehicle', 'A photo of green vehicle']

# texts_list_sim = ['A photo of grey truck with a 3:1 aspect ratio', 'A photo of grey and red truck with a 4:1 aspect ratio', 'A photo of grey and black jeep with a 2:1 aspect ratio', 
#                   'A photo of white bus', 'A photo of blue car', 'A photo of yellow pick-up truck', 
#                   'A photo of dark green vehicle', 'A photo of grey and white truck with white headstock', 'A photo of light green and dark green tent in irregular rectangle']

# texts_list_sim = ['A photo of grey truck with black dashed lines', 'A photo of grey truck with eight red circles', 'A photo of grey camouflage jeep with a black square', 
#                   'A photo of white rectangle bus with a small white rectangle', 'A photo of blue car', 'A photo of yellow pick-up truck', 
#                   'A photo of dark green rectangle vehicle', 'A photo of grey truck with eight dark grey squares', 'A photo of light green and dark green tent in irregular rectangle']

texts_list_sim = ['A photo of greyish green long truck with black dashed lines', 'A photo of grey truck with eight red circles', 'A photo of grey camouflage jeep with a black square', 
                  'A photo of white long rectangle bus with a small white rectangle', 'A photo of blue car', 'A photo of yellow pick-up truck', 
                  'A photo of dark green rectangle vehicle', 'A photo of grey truck with eight dark grey squares','A photo of light green campsite and dark green tent roof in irregular rectangle'] # best for crop2000

# texts_list_sim = ['A photo of a greyish green long truck with black dashed lines and two small black rectangles', 'A photo of a grey truck with eight red circles', 'A photo of a grey camouflage jeep with a black square', 
#                   'A photo of a white long rectangle bus with a small white rectangle', 'A photo of a blue car', 'A photo of a yellow pick-up truck', 
#                   'A photo of a dark green rectangle vehicle', 'A photo of a grey truck with eight dark grey squares','A photo of a light green and dark green tent in irregular rectangle']  # best for crop800

texts_list_zhuanpan = ['A photo of black and dark green stuff', 'A photo of red stuff', 'A photo of green bottle', 'A photo of light pink and yellow stuff']

# texts_list_mix = ['0greyish green', '1long truck', '2black dashed lines', '3grey', '4eight red circles', '5grey camouflage', '6jeep', '7a black square', '8white' , '9long rectangle bus', '10a small white rectangle on big long rectangle', '11blue', '12car', '13front and rear windows', '14yellow', '15pick-up truck', '16front and rear bucket', '17dark green', '18rectangle suv', '19eight dark grey squares', '20light green and dark green', '21campsite', '22tent roof', '23irregular rectangle']

# texts_list_mix = ['A photo of greyish green', 'A photo of long truck', 'A photo of black dashed lines', 'A photo of grey', 'A photo of eight red circles', 'A photo of grey camouflage', 'A photo of jeep', 'A photo of a black square', 'A photo of white' , 'A photo of long rectangle bus', 'A photo of a small white rectangle on big long rectangle', 'A photo of blue', 'A photo of car', 'A photo of front and rear windows', 'A photo of yellow', 'A photo of pick-up truck', 'A photo of front and rear bucket', 'A photo of dark green', 'A photo of rectangle suv', 'A photo of eight dark grey squares', 'A photo of light green and dark green', 'A photo of campsite', 'A photo of tent roof', 'A photo of irregular rectangle']

texts_list_mix = ['greyish green', 'long truck', 'black dashed lines', 'grey', 'eight red circles', 'grey camouflage', 'jeep', 'a black square', 'white' , 'long rectangle bus', 'a small white rectangle on big long rectangle', 'blue', 'car', 'front and rear windows', 'yellow', 'pick-up truck', 'front and rear bucket', 'dark green', 'rectangle suv', 'eight dark grey squares', 'light green and dark green', 'campsite', 'tent roof', 'irregular rectangle']



#提取文本特征
def get_text_norm_features(texts_model, texts_list=[]):
    with torch.no_grad(), torch.cuda.amp.autocast():
        text = tokenize(texts_list)
        text_features = texts_model.encode_text(text)
        text_features /= text_features.norm(dim=-1, keepdim=True)
        return text_features


#直接比较特征, 由于随机初始化, 特征会随网络有所变化
def check_text_images_similarity(image_features, text_features):
    text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
    return text_probs

# image_features, text_features = get_visions_norm_features(), get_text_norm_features()
# print('text_probs', check_text_images_similarity(image_features, text_features))


#载入预训练模型比较特征, 模型固定, 特征值固定
state = torch.load('/home/ycy/project/codes_1116_21/codes_vit/CLIP-ViT-B-32-256x256-DataComp-s34B-b86K/open_clip_pytorch_model.bin') 
# for i in range(10):
texts_model =  CLIPText()
visions_model = CLIPViT()
txt_state = {}
img_state = {}
for k,v in state.items():
    # parameters.append(k)
    if k.count("visual"):
        img_state[k] = state[k]
    elif (k.count("text") or k.count("token") or ("transformer")  or ("ln_final")):
        txt_state [k] = state[k]

texts_model.load_state_dict(txt_state, strict=False)
visions_model.load_state_dict(img_state, strict=False)
name_list = ['hms2000', 'hms_dan2000','jeep2000','civil_bus2000','civil_car2000','civil_pika2000','civil_suv2000','civil_truck2000','command_post2000']
# name_list = ['hms800', 'hms_dan800','jeep800','civil_bus800','civil_car800','civil_pika800','civil_suv800','civil_truck800','command_post800']
# name_list = ['black', 'red', 'green', 'yellow']
# 
# imgs = glob.glob("/home/ycy/project/h20t_v0.1/visible/choice_imags/*")
# print('imgs----', imgs)

# name_list = ['bus', 'car', 'truck']

# imgs = glob.glob("/home/ycy/project/DroneVehicle/color_selected/*")
# imgs = glob.glob("/home/ycy/project/tuzhuang/aug_imgs/*")
# imgs = glob.glob("/home/ycy/project/tuzhuang/blur_imgs/*")
imgs = glob.glob("/home/ycy/project/simulation_crop/crop_2000/*")
# imgs = glob.glob("/home/ycy/project/simulation_crop/crop_132/*")


# print('imgs: ', imgs)

text_features = get_text_norm_features(texts_model, texts_list=texts_list_mix)
# lyy: if you want to save the text_features, you should open the comment
# for j in range(len(text_features)):
#     emb_str = str(text_features[j].tolist()).replace('[', '').replace(']', '')  
#     with open(f'DroneVehicles/color/vit_text_{j}.txt', 'w', encoding='utf-8') as f:
#         f.write(emb_str)


mean_acc = 0
for img in imgs:
    cls_str = img.split('/home/ycy/project/simulation_crop/crop_2000/')[1]
    class_id = name_list.index(cls_str)

    images_path_list = glob.glob(img+'/*')
    # print('images_path_list---',images_path_list)
    
    right_count = 0
    for i in images_path_list:
        image_features = get_visions_norm_features(visions_model, images_path_list=[i])
        similarity_res = np.array(check_text_images_similarity(image_features, text_features).squeeze()).tolist()
        # print(similarity_res)

        res_0 = (similarity_res[0]+similarity_res[1]+similarity_res[2])/3
        res_1 = (similarity_res[1]+similarity_res[3]+similarity_res[4])/3
        res_2 = (similarity_res[5]+similarity_res[6]+similarity_res[7]+similarity_res[18])/4
        res_3 = (similarity_res[8]+similarity_res[9]+similarity_res[10])/3
        res_4 = (similarity_res[11]+similarity_res[12]+similarity_res[13])/3
        res_5 = (similarity_res[14]+similarity_res[15]+similarity_res[16])/3
        res_6 = (similarity_res[17]+similarity_res[18])/2
        res_7 = (similarity_res[1]+similarity_res[3]+similarity_res[19])/3
        res_8 = (similarity_res[20]+similarity_res[21]+similarity_res[22]+similarity_res[23])/4

        res_group = [res_0, res_1, res_2, res_3, res_4, res_5, res_6, res_7, res_8]
        # print(res_group)

        sort_order = np.argsort(-np.array(res_group))

        # print(f'{i}---text_probs', check_text_images_similarity(image_features, text_features))
        # sort_order = np.argsort(-np.array(check_text_images_similarity(image_features, text_features).squeeze()))
        # print(sort_order)
        # if sort_order.tolist()[0] == class_id: # recall@1
        if (sort_order.tolist()[0]== class_id) or (sort_order.tolist()[1] == class_id): # recall@2
        # if (sort_order.tolist()[0]== class_id) or (sort_order.tolist()[1]== class_id) or (sort_order.tolist()[2] == class_id): # recall@3
            right_count += 1
    acc = right_count / len(images_path_list)
    mean_acc += acc
    
    print('class: ', name_list[class_id], '------ACC:', acc)

print('mean_acc:', mean_acc/(len(name_list)))
    