sgugger commited on
Commit
d37ca74
1 Parent(s): a6887f8

Update with commit 2840272c5f872315a5c37b8aee0454d2129b8bc7

Browse files

See: https://github.com/huggingface/transformers/commit/2840272c5f872315a5c37b8aee0454d2129b8bc7

Files changed (2) hide show
  1. frameworks.json +1 -1
  2. pipeline_tags.json +2 -0
frameworks.json CHANGED
@@ -153,7 +153,7 @@
153
  {"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
154
  {"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
155
  {"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
156
- {"model_type":"whisper","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoProcessor"}
157
  {"model_type":"xclip","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
158
  {"model_type":"xglm","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
159
  {"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
 
153
  {"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
154
  {"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
155
  {"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
156
+ {"model_type":"whisper","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
157
  {"model_type":"xclip","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
158
  {"model_type":"xglm","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
159
  {"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
pipeline_tags.json CHANGED
@@ -285,6 +285,8 @@
285
  {"model_class":"FlaxVisionTextDualEncoderModel","pipeline_tag":"feature-extraction","auto_class":"Flax_AutoModel"}
286
  {"model_class":"FlaxWav2Vec2ForPreTraining","pipeline_tag":"pretraining","auto_class":"Flax_AutoModelForPreTraining"}
287
  {"model_class":"FlaxWav2Vec2Model","pipeline_tag":"feature-extraction","auto_class":"Flax_AutoModel"}
 
 
288
  {"model_class":"FlaxXGLMForCausalLM","pipeline_tag":"text-generation","auto_class":"Flax_AutoModelForCausalLM"}
289
  {"model_class":"FlaxXGLMModel","pipeline_tag":"feature-extraction","auto_class":"Flax_AutoModel"}
290
  {"model_class":"FlaxXLMRobertaForCausalLM","pipeline_tag":"text-generation","auto_class":"Flax_AutoModelForCausalLM"}
 
285
  {"model_class":"FlaxVisionTextDualEncoderModel","pipeline_tag":"feature-extraction","auto_class":"Flax_AutoModel"}
286
  {"model_class":"FlaxWav2Vec2ForPreTraining","pipeline_tag":"pretraining","auto_class":"Flax_AutoModelForPreTraining"}
287
  {"model_class":"FlaxWav2Vec2Model","pipeline_tag":"feature-extraction","auto_class":"Flax_AutoModel"}
288
+ {"model_class":"FlaxWhisperForConditionalGeneration","pipeline_tag":"automatic-speech-recognition","auto_class":"Flax_AutoModelForSpeechSeq2Seq"}
289
+ {"model_class":"FlaxWhisperModel","pipeline_tag":"feature-extraction","auto_class":"Flax_AutoModel"}
290
  {"model_class":"FlaxXGLMForCausalLM","pipeline_tag":"text-generation","auto_class":"Flax_AutoModelForCausalLM"}
291
  {"model_class":"FlaxXGLMModel","pipeline_tag":"feature-extraction","auto_class":"Flax_AutoModel"}
292
  {"model_class":"FlaxXLMRobertaForCausalLM","pipeline_tag":"text-generation","auto_class":"Flax_AutoModelForCausalLM"}