File size: 2,825 Bytes
54199b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import gradio as gr
import torch
from PIL import Image
from src.open_clip import create_model_and_transforms, get_tokenizer
import warnings
import argparse

warnings.filterwarnings("ignore", category=UserWarning)

# Create an argument parser
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, default='HPS_v2.pt', help='Path to the model checkpoint')

args = parser.parse_args()

device = 'cuda' if torch.cuda.is_available() else 'cpu'
model, preprocess_train, preprocess_val = create_model_and_transforms(
    'ViT-H-14',
    'laion2B-s32B-b79K',
    precision='amp',
    device=device,
    jit=False,
    force_quick_gelu=False,
    force_custom_text=False,
    force_patch_dropout=False,
    force_image_size=None,
    pretrained_image=False,
    image_mean=None,
    image_std=None,
    light_augmentation=True,
    aug_cfg={},
    output_dict=True,
    with_score_predictor=False,
    with_region_predictor=False
)

checkpoint = torch.load(args.checkpoint)
model.load_state_dict(checkpoint['state_dict'])
tokenizer = get_tokenizer('ViT-H-14')
model.eval()

intro = """
<h1 style="font-weight: 1400; text-align: center; margin-bottom: 7px;">
   HPS v2
</h1>
<h3 style="font-weight: 600; text-align: center;">
    evaluating human preference for generated images
</h3>
<h4 style="text-align: center; margin-bottom: 7px;">
    <a href="https://github.com/tgxs002/HPSv2" style="text-decoration: underline;" target="_blank">GitHub</a> | <a href="https://arxiv.org/abs/2306.09341" style="text-decoration: underline;" target="_blank">ArXiv</a>
</h4>
<p style="font-size: 0.9rem; margin: 0rem; line-height: 1.2em; margin-top:1em">
<p/>"""

def inference(image, prompt):
    # Load your image and prompt
    with torch.no_grad():
        
        # Process the image
        image = preprocess_val(image).unsqueeze(0).to(device=device, non_blocking=True)
        # Process the prompt
        text = tokenizer([prompt]).to(device=device, non_blocking=True)
        # Calculate the HPS
        with torch.cuda.amp.autocast():
            outputs = model(image, text)
            image_features, text_features = outputs["image_features"], outputs["text_features"]
            logits_per_image = image_features @ text_features.T

            hps_score = torch.diagonal(logits_per_image).cpu().numpy()
    output = 'HPSv2 score: ' + str(hps_score[0])
    return output

with gr.Blocks(css="style.css") as demo:
    gr.HTML(intro)
    with gr.Column():
        image = gr.Image(label="Image", type="pil")
        prompt = gr.Textbox(lines=1, label="Prompt")
        button = gr.Button("Compute HPS v2")
        score = gr.Textbox(label="output", lines=1, interactive=False, elem_id="output")
    button.click(inference, inputs=[image, prompt], outputs=score)

demo.queue(concurrency_count=1)
demo.launch()