# script to generate embeddings and perform similarity searches
import os
os.environ['CUDA_VISIBLE_DEVICES']='3'
# 必须在torch之前编写环境变量

import pandas as pd
import torch
from transformers import AutoImageProcessor, EfficientNetModel, ViTModel, AutoModel, CLIPProcessor, CLIPModel, Blip2Processor, Blip2Model
from torchvision import models, transforms
import numpy as np
import os
from PIL import Image


flags_df = pd.read_csv('viton.csv')  # Uncomment if you're loading from a CSV
IMAGE_DIR = "/data/shengjie/VITON-HD_ori/train/cloth"

'''
google/vit-large-patch16-224-in21k
google/efficientnet-b7
facebook/dinov2-base
openai/clip-vit-base-patch32
Salesforce/blip2-opt-2.7b
'''
VIT_PATH = '/home/shengjie/ckp/vit-large-patch16-224-in21k'
EFFICIENT_PATH = '/home/shengjie/ckp/efficientnet-b7'
DINO_V2 = '/home/shengjie/ckp/dinov2-base'
CLIP_PATH = '/home/shengjie/ckp/clip-vit-base-patch32'
BLIP_PATH = '/home/shengjie/ckp/blip2-opt-2.7b'

def load_local_image(filename_name):
    # Sanitize the filename name to match the local image file naming convention
    # sanitized_filename_name = filename_name.replace(" ", "_").replace("[", "").replace("]", "")
    sanitized_filename_name = filename_name

    # Path to the local image file
    image_path = os.path.join(IMAGE_DIR, f"{sanitized_filename_name}")

    # pdb.set_trace()
    # Check if the image exists in the folder
    if os.path.exists(image_path):
        img = Image.open(image_path)

        # Convert image to RGB if not already in that mode
        if img.mode != 'RGB':
            img = img.convert('RGB')

        print(f'extrating {image_path}')
        return img
    else:
        print(f"Image for {filename_name} not found.")
        return None
#ViT

def extract_features_vit(filename):
    image_processor = AutoImageProcessor.from_pretrained(VIT_PATH)
    model = ViTModel.from_pretrained(VIT_PATH)

    # prepare input image
    img = load_local_image(filename)
    inputs = image_processor(img, return_tensors='pt')

    with torch.no_grad():
        outputs = model(**inputs)
    embedding = outputs.last_hidden_state
    embedding = embedding[:, 0, :].squeeze(1)
    return embedding.numpy()
#EfficientNet

def extract_features_efficientNet(filename):
    # load pre-trained image processor for efficientnet-b7 and model weight
    image_processor = AutoImageProcessor.from_pretrained(EFFICIENT_PATH)
    model = EfficientNetModel.from_pretrained(EFFICIENT_PATH)

    # prepare input image
    img = load_local_image(filename)
    inputs = image_processor(img, return_tensors='pt')

    with torch.no_grad():
        outputs = model(**inputs, output_hidden_states=True)

    embedding = outputs.hidden_states[-1]
    embedding = torch.mean(embedding, dim=[2,3])
    return embedding.numpy()

#DINO-v2
def get_dino():
    # load pre-trained image processor for efficientnet-b7 and model weight
    image_processor = AutoImageProcessor.from_pretrained(DINO_V2)
    model = AutoModel.from_pretrained(DINO_V2)
    return image_processor,model.cuda()
def extract_features_DINO_v2(filename,image_processor,model):
    # prepare input image
    img = load_local_image(filename)
    inputs = image_processor(img, return_tensors='pt')
    # pdb.set_trace()
    # inputs['pixel_values'] = inputs['pixel_values'].cuda()
    for k in inputs:
        inputs[k] = inputs[k].cuda()
    with torch.no_grad():
        outputs = model(**inputs)
    embedding = outputs.last_hidden_state
    embedding = embedding[:, 0, :].squeeze(1)
    return embedding.cpu().numpy()

#CLIP
def get_clip():
    image_processor = CLIPProcessor.from_pretrained(CLIP_PATH)
    model = CLIPModel.from_pretrained(CLIP_PATH)
    return image_processor,model.cuda()
def extract_features_clip(filename,image_processor,model):
    # load pre-trained image processor for efficientnet-b7 and model weight

    # prepare input image
    img = load_local_image(filename)
    inputs = image_processor(images=img, return_tensors='pt', padding=True)
    for k in inputs:
        inputs[k] = inputs[k].cuda()
    with torch.no_grad():
        embedding = model.get_image_features(**inputs) 
    return embedding.cpu().numpy()

#Blip 2
def get_blip():
    image_processor = Blip2Processor.from_pretrained(BLIP_PATH)
    model = Blip2Model.from_pretrained(BLIP_PATH, torch_dtype=torch.float16)
    return image_processor,model.cuda()
def extract_features_blip(filename,image_processor,model):
    img = load_local_image(filename)
    inputs = image_processor(images=img, return_tensors='pt', padding=True)
    # print('input shape: ', inputs['pixel_values'].shape)
    for k in inputs:
        inputs[k] = inputs[k].cuda()
    with torch.no_grad():
        outputs = model.get_qformer_features(**inputs)
    embedding = outputs.last_hidden_state
    embedding = embedding[:, 0, :].squeeze(1)
    return embedding.cpu().numpy()

#vgg16

def extract_features_vgg16(filename):
    model = models.vgg16(pretrained=True) 
    model.eval()  # Set the model to evaluation mode

    # Define the transformation to preprocess the image
    preprocess = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    img = load_local_image(filename)
    img_t = preprocess(img)
    batch_t = torch.unsqueeze(img_t, 0)

    with torch.no_grad():
        embedding = model(batch_t)
    # pdb.set_trace()
    return embedding.numpy()

# Extract features for all flags
image_processor,model = get_blip()
flags_df['features'] = flags_df['filename'].apply(extract_features_blip,args=(image_processor,model))
#export embeddings to CSV
flags_df.to_csv('viton_embeddings_blip.csv', index=False)
#Cosine similarity with FAISS
