Transformers
PyTorch
flava
pretraining
Inference Endpoints
mjspeck commited on
Commit
9c1497a
1 Parent(s): 54652f1

Update README.md

Browse files

Fixed typo in README

Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -133,7 +133,7 @@ text_embeddings = outputs.text_embeddings # Batch size X (Text sequence length +
133
  multimodal_embeddings = outputs.multimodal_embeddings # Batch size X (Number of image patches + Text Sequence Length + 3) X Hidden size => 2 X 275 x 768
134
 
135
  # Loss
136
- loss = output.loss # probably NaN due to missing labels
137
 
138
  # Global contrastive loss logits
139
  image_contrastive_logits = outputs.contrastive_logits_per_image
 
133
  multimodal_embeddings = outputs.multimodal_embeddings # Batch size X (Number of image patches + Text Sequence Length + 3) X Hidden size => 2 X 275 x 768
134
 
135
  # Loss
136
+ loss = outputs.loss # probably NaN due to missing labels
137
 
138
  # Global contrastive loss logits
139
  image_contrastive_logits = outputs.contrastive_logits_per_image