import gradio as gr import torch from PIL import Image from src.open_clip import create_model_and_transforms, get_tokenizer import warnings import argparse warnings.filterwarnings("ignore", category=UserWarning) # Create an argument parser parser = argparse.ArgumentParser() parser.add_argument('--checkpoint', type=str, default='HPS_v2.pt', help='Path to the model checkpoint') args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' model, preprocess_train, preprocess_val = create_model_and_transforms( 'ViT-H-14', 'laion2B-s32B-b79K', precision='amp', device=device, jit=False, force_quick_gelu=False, force_custom_text=False, force_patch_dropout=False, force_image_size=None, pretrained_image=False, image_mean=None, image_std=None, light_augmentation=True, aug_cfg={}, output_dict=True, with_score_predictor=False, with_region_predictor=False ) checkpoint = torch.load(args.checkpoint) model.load_state_dict(checkpoint['state_dict']) tokenizer = get_tokenizer('ViT-H-14') model.eval() intro = """

HPS v2

evaluating human preference for generated images

GitHub | ArXiv

""" def inference(image, prompt): # Load your image and prompt with torch.no_grad(): # Process the image image = preprocess_val(image).unsqueeze(0).to(device=device, non_blocking=True) # Process the prompt text = tokenizer([prompt]).to(device=device, non_blocking=True) # Calculate the HPS with torch.cuda.amp.autocast(): outputs = model(image, text) image_features, text_features = outputs["image_features"], outputs["text_features"] logits_per_image = image_features @ text_features.T hps_score = torch.diagonal(logits_per_image).cpu().numpy() output = 'HPSv2 score: ' + str(hps_score[0]) return output with gr.Blocks(css="style.css") as demo: gr.HTML(intro) with gr.Column(): image = gr.Image(label="Image", type="pil") prompt = gr.Textbox(lines=1, label="Prompt") button = gr.Button("Compute HPS v2") score = gr.Textbox(label="output", lines=1, interactive=False, elem_id="output") button.click(inference, inputs=[image, prompt], outputs=score) demo.queue(concurrency_count=1) demo.launch()