nielsr HF staff commited on
Commit
cdaa4f1
1 Parent(s): 6542b2d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -5
README.md CHANGED
@@ -34,14 +34,15 @@ fine-tuned versions on a task that interests you.
34
  Here is how to use this model:
35
 
36
  ```python
37
- from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation
38
  from PIL import Image
39
  import requests
40
 
41
  url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
42
  image = Image.open(requests.get(url, stream=True).raw)
43
- feature_extractor = MaskFormerFeatureExtractor.from_pretrained("facebook/maskformer-swin-large-ade")
44
- inputs = feature_extractor(images=image, return_tensors="pt")
 
45
 
46
  model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-large-ade")
47
  outputs = model(**inputs)
@@ -50,9 +51,9 @@ outputs = model(**inputs)
50
  class_queries_logits = outputs.class_queries_logits
51
  masks_queries_logits = outputs.masks_queries_logits
52
 
53
- # you can pass them to feature_extractor for postprocessing
54
  # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs)
55
- predicted_semantic_map = feature_extractor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
56
  ```
57
 
58
  For more code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/maskformer).
34
  Here is how to use this model:
35
 
36
  ```python
37
+ from transformers import MaskFormerImageProcessor, MaskFormerForInstanceSegmentation
38
  from PIL import Image
39
  import requests
40
 
41
  url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
42
  image = Image.open(requests.get(url, stream=True).raw)
43
+
44
+ processor = MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-large-ade")
45
+ inputs = processor(images=image, return_tensors="pt")
46
 
47
  model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-large-ade")
48
  outputs = model(**inputs)
51
  class_queries_logits = outputs.class_queries_logits
52
  masks_queries_logits = outputs.masks_queries_logits
53
 
54
+ # you can pass them to processor for postprocessing
55
  # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs)
56
+ predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
57
  ```
58
 
59
  For more code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/maskformer).