sgugger commited on
Commit
cf1c7eb
1 Parent(s): 04457bf

Update with commit 8a5e8a9c2a4dc54b4485994311de6b839976828c

Browse files

See: https://github.com/huggingface/transformers/commit/8a5e8a9c2a4dc54b4485994311de6b839976828c

Files changed (2) hide show
  1. frameworks.json +1 -0
  2. pipeline_tags.json +2 -0
frameworks.json CHANGED
@@ -175,6 +175,7 @@
175
  {"model_type":"vit_hybrid","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
176
  {"model_type":"vit_mae","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
177
  {"model_type":"vit_msn","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
 
178
  {"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
179
  {"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
180
  {"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
 
175
  {"model_type":"vit_hybrid","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
176
  {"model_type":"vit_mae","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
177
  {"model_type":"vit_msn","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
178
+ {"model_type":"vivit","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
179
  {"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
180
  {"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
181
  {"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
pipeline_tags.json CHANGED
@@ -913,6 +913,8 @@
913
  {"model_class":"VisionTextDualEncoderModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
914
  {"model_class":"VisualBertForPreTraining","pipeline_tag":"pretraining","auto_class":"AutoModelForPreTraining"}
915
  {"model_class":"VisualBertModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
 
 
916
  {"model_class":"Wav2Vec2ConformerForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
917
  {"model_class":"Wav2Vec2ConformerForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
918
  {"model_class":"Wav2Vec2ConformerForPreTraining","pipeline_tag":"pretraining","auto_class":"AutoModelForPreTraining"}
 
913
  {"model_class":"VisionTextDualEncoderModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
914
  {"model_class":"VisualBertForPreTraining","pipeline_tag":"pretraining","auto_class":"AutoModelForPreTraining"}
915
  {"model_class":"VisualBertModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
916
+ {"model_class":"VivitForVideoClassification","pipeline_tag":"video-classification","auto_class":"AutoModelForVideoClassification"}
917
+ {"model_class":"VivitModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
918
  {"model_class":"Wav2Vec2ConformerForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
919
  {"model_class":"Wav2Vec2ConformerForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
920
  {"model_class":"Wav2Vec2ConformerForPreTraining","pipeline_tag":"pretraining","auto_class":"AutoModelForPreTraining"}