from .load_model import build_transformer
from .clip import _transform
import torch
from torch import nn
from torchvision import transforms
from PIL import Image

class TransformFeatureExtractor(nn.Module):
    def __init__(self,weight_path):
        super().__init__()
        self.model = build_transformer().eval()
        self.model.load_param(weight_path)
    @torch.no_grad()
    def __call__(self, x):

        feature = self.model(x)
        return feature
    

def preprocess_image(image_path, from_array=True):
    # preprocess = transforms.Compose([
    #     transforms.Resize((256, 128)),
    #     transforms.ToTensor(),
    #     transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    # ])
    preprocess = _transform(n_px=(256,128))
    if from_array:
        image = Image.fromarray(image_path)
    else:
        image = Image.open(image_path).convert('RGB')
    image = preprocess(image)
    image = image.unsqueeze(0)  # Add batch dimension
    return image
    
def extract_features(model, image_path, device, from_array=False, trans=False):
    input_image = preprocess_image(image_path, from_array).to(device)
    with torch.no_grad():
        features = model(input_image)
    return features.squeeze().cpu().numpy()
    