JulianPhillips commited on
Commit
9dcf517
·
verified ·
1 Parent(s): 1675686

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +4 -4
Dockerfile CHANGED
@@ -49,12 +49,12 @@ import tensorflow_hub as hub\n\n\
49
  movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\n\
50
  # Download BLIP model and tokenizer using huggingface_hub\n\
51
  from transformers import BlipForConditionalGeneration, BlipProcessor\n\
52
- BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\
53
- BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\n\
54
  # Download CLIP model and processor using huggingface_hub\n\
55
  from transformers import CLIPModel, CLIPProcessor\n\
56
- CLIPModel.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')\n\
57
- CLIPProcessor.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')" > download_models.py
58
 
59
  # Run the script to download models
60
  RUN python download_models.py
 
49
  movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\n\
50
  # Download BLIP model and tokenizer using huggingface_hub\n\
51
  from transformers import BlipForConditionalGeneration, BlipProcessor\n\
52
+ BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-base').save_pretrained('/models/blip')\n\
53
+ BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-base').save_pretrained('/models/blip')\n\n\
54
  # Download CLIP model and processor using huggingface_hub\n\
55
  from transformers import CLIPModel, CLIPProcessor\n\
56
+ CLIPModel.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')\n\
57
+ CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')" > download_models.py
58
 
59
  # Run the script to download models
60
  RUN python download_models.py