Update README.md
Browse files
README.md
CHANGED
@@ -53,11 +53,9 @@ from PIL import Image
|
|
53 |
import numpy as np
|
54 |
|
55 |
model_args = ModelArguments(
|
56 |
-
model_name='
|
57 |
pooling='last',
|
58 |
-
normalize=True
|
59 |
-
lora=True,
|
60 |
-
checkpoint_path='TIGER-Lab/VLM2Vec-LoRA')
|
61 |
|
62 |
model = MMEBModel.load(model_args)
|
63 |
model.eval()
|
@@ -99,6 +97,17 @@ inputs = {key: value.to('cuda') for key, value in inputs.items()}
|
|
99 |
tgt_output = model(tgt=inputs)["tgt_reps"]
|
100 |
print(string, '=', model.compute_similarity(qry_output, tgt_output))
|
101 |
## <|image_1|> Represent the given image. = tensor([[0.3105]], device='cuda:0', dtype=torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
```
|
103 |
|
104 |
## Citation
|
|
|
53 |
import numpy as np
|
54 |
|
55 |
model_args = ModelArguments(
|
56 |
+
model_name='TIGER-Lab/VLM2Vec-Full',
|
57 |
pooling='last',
|
58 |
+
normalize=True)
|
|
|
|
|
59 |
|
60 |
model = MMEBModel.load(model_args)
|
61 |
model.eval()
|
|
|
97 |
tgt_output = model(tgt=inputs)["tgt_reps"]
|
98 |
print(string, '=', model.compute_similarity(qry_output, tgt_output))
|
99 |
## <|image_1|> Represent the given image. = tensor([[0.3105]], device='cuda:0', dtype=torch.bfloat16)
|
100 |
+
|
101 |
+
inputs = processor('Find me an everyday image that matches the given caption: A cat and a tiger.',)
|
102 |
+
inputs = {key: value.to('cuda') for key, value in inputs.items()}
|
103 |
+
qry_output = model(qry=inputs)["qry_reps"]
|
104 |
+
|
105 |
+
string = '<|image_1|> Represent the given image.'
|
106 |
+
inputs = processor(string, [Image.open('figures/example.jpg')])
|
107 |
+
inputs = {key: value.to('cuda') for key, value in inputs.items()}
|
108 |
+
tgt_output = model(tgt=inputs)["tgt_reps"]
|
109 |
+
print(string, '=', model.compute_similarity(qry_output, tgt_output))
|
110 |
+
## <|image_1|> Represent the given image. = tensor([[0.2158]], device='cuda:0', dtype=torch.bfloat16)
|
111 |
```
|
112 |
|
113 |
## Citation
|