merve HF staff commited on
Commit
6901a99
β€’
1 Parent(s): fd70df6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from transformers import AutoTokenizer, CLIPImageProcessorProcessor, AutoProcessor, pipeline, CLIPTokenizer
2
  import torchvision.transforms as T
3
  import torch.nn.functional as F
4
  from PIL import Image, ImageFile
@@ -60,6 +60,9 @@ def infer(image, labels):
60
 
61
  with gr.Blocks() as demo:
62
  gr.Markdown("# EVACLIP vs CLIP πŸ’₯ ")
 
 
 
63
  with gr.Row():
64
  with gr.Column():
65
  image_input = gr.Image(type="pil")
 
1
+ from transformers import CLIPImageProcessor, pipeline, CLIPTokenizer
2
  import torchvision.transforms as T
3
  import torch.nn.functional as F
4
  from PIL import Image, ImageFile
 
60
 
61
  with gr.Blocks() as demo:
62
  gr.Markdown("# EVACLIP vs CLIP πŸ’₯ ")
63
+ gr.Markdown("[EVACLIP](https://huggingface.co/BAAI/EVA-CLIP-8B) is CLIP scaled to the moon! πŸ”₯")
64
+ gr.Markdown("It's a state-of-the-art zero-shot image classification model, which is also outperforming predecessors on text-image retrieval and linear probing.")
65
+ gr.Markdown("In this demo, compare EVACLIP outputs to CLIP outputs ✨")
66
  with gr.Row():
67
  with gr.Column():
68
  image_input = gr.Image(type="pil")