lysandre HF staff commited on
Commit
94317e0
1 Parent(s): 62aa5e1

Update with commit d2cdefb9ec5080b78be302740a9cbaf48241b5c6

Browse files

See: https://github.com/huggingface/transformers/commit/d2cdefb9ec5080b78be302740a9cbaf48241b5c6

Files changed (2) hide show
  1. frameworks.json +1 -0
  2. pipeline_tags.json +5 -0
frameworks.json CHANGED
@@ -208,6 +208,7 @@
208
  {"model_type":"vits","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
209
  {"model_type":"vivit","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
210
  {"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
 
211
  {"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
212
  {"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
213
  {"model_type":"whisper","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
 
208
  {"model_type":"vits","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
209
  {"model_type":"vivit","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
210
  {"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
211
+ {"model_type":"wav2vec2-bert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
212
  {"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
213
  {"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
214
  {"model_type":"whisper","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
pipeline_tags.json CHANGED
@@ -986,6 +986,11 @@
986
  {"model_class":"VitsModel","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
987
  {"model_class":"VivitForVideoClassification","pipeline_tag":"video-classification","auto_class":"AutoModelForVideoClassification"}
988
  {"model_class":"VivitModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
 
 
 
 
 
989
  {"model_class":"Wav2Vec2ConformerForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
990
  {"model_class":"Wav2Vec2ConformerForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
991
  {"model_class":"Wav2Vec2ConformerForPreTraining","pipeline_tag":"pretraining","auto_class":"AutoModelForPreTraining"}
 
986
  {"model_class":"VitsModel","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
987
  {"model_class":"VivitForVideoClassification","pipeline_tag":"video-classification","auto_class":"AutoModelForVideoClassification"}
988
  {"model_class":"VivitModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
989
+ {"model_class":"Wav2Vec2BertForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
990
+ {"model_class":"Wav2Vec2BertForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
991
+ {"model_class":"Wav2Vec2BertForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
992
+ {"model_class":"Wav2Vec2BertForXVector","pipeline_tag":"audio-xvector","auto_class":"AutoModelForAudioXVector"}
993
+ {"model_class":"Wav2Vec2BertModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
994
  {"model_class":"Wav2Vec2ConformerForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
995
  {"model_class":"Wav2Vec2ConformerForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
996
  {"model_class":"Wav2Vec2ConformerForPreTraining","pipeline_tag":"pretraining","auto_class":"AutoModelForPreTraining"}