nielsr HF staff commited on
Commit
7cbdb7e
1 Parent(s): 1ba429d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -33,16 +33,16 @@ fine-tuned versions on a task that interests you.
33
  Here is how to use this model in PyTorch:
34
 
35
  ```python
36
- from transformers import ViTFeatureExtractor, ViTModel
37
  from PIL import Image
38
  import requests
39
 
40
  url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
41
  image = Image.open(requests.get(url, stream=True).raw)
42
 
43
- feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
44
  model = ViTModel.from_pretrained('google/vit-base-patch16-224-in21k')
45
- inputs = feature_extractor(images=image, return_tensors="pt")
46
 
47
  outputs = model(**inputs)
48
  last_hidden_states = outputs.last_hidden_state
@@ -51,17 +51,17 @@ last_hidden_states = outputs.last_hidden_state
51
  Here is how to use this model in JAX/Flax:
52
 
53
  ```python
54
- from transformers import ViTFeatureExtractor, FlaxViTModel
55
  from PIL import Image
56
  import requests
57
 
58
  url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
59
  image = Image.open(requests.get(url, stream=True).raw)
60
 
61
- feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
62
  model = FlaxViTModel.from_pretrained('google/vit-base-patch16-224-in21k')
63
 
64
- inputs = feature_extractor(images=image, return_tensors="np")
65
  outputs = model(**inputs)
66
  last_hidden_states = outputs.last_hidden_state
67
  ```
33
  Here is how to use this model in PyTorch:
34
 
35
  ```python
36
+ from transformers import ViTImageProcessor, ViTModel
37
  from PIL import Image
38
  import requests
39
 
40
  url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
41
  image = Image.open(requests.get(url, stream=True).raw)
42
 
43
+ processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')
44
  model = ViTModel.from_pretrained('google/vit-base-patch16-224-in21k')
45
+ inputs = processor(images=image, return_tensors="pt")
46
 
47
  outputs = model(**inputs)
48
  last_hidden_states = outputs.last_hidden_state
51
  Here is how to use this model in JAX/Flax:
52
 
53
  ```python
54
+ from transformers import ViTImageProcessor, FlaxViTModel
55
  from PIL import Image
56
  import requests
57
 
58
  url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
59
  image = Image.open(requests.get(url, stream=True).raw)
60
 
61
+ processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')
62
  model = FlaxViTModel.from_pretrained('google/vit-base-patch16-224-in21k')
63
 
64
+ inputs = processor(images=image, return_tensors="np")
65
  outputs = model(**inputs)
66
  last_hidden_states = outputs.last_hidden_state
67
  ```