sgugger commited on
Commit
60c8007
1 Parent(s): fd61000

Update with commit bef1e3e4a00bd0863f804ba0a4e05dc77676a341

Browse files
Files changed (2) hide show
  1. frameworks.json +1 -0
  2. pipeline_tags.json +3 -0
frameworks.json CHANGED
@@ -72,6 +72,7 @@
72
  {"model_type":"visual_bert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
73
  {"model_type":"vit","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoFeatureExtractor"}
74
  {"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
 
75
  {"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
76
  {"model_type":"xlm-prophetnet","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
77
  {"model_type":"xlm-roberta","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
 
72
  {"model_type":"visual_bert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
73
  {"model_type":"vit","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoFeatureExtractor"}
74
  {"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
75
+ {"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
76
  {"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
77
  {"model_type":"xlm-prophetnet","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
78
  {"model_type":"xlm-roberta","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
pipeline_tags.json CHANGED
@@ -547,6 +547,9 @@
547
  {"model_class":"Wav2Vec2ForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
548
  {"model_class":"Wav2Vec2ForXVector","pipeline_tag":"audio-xvector","auto_class":"AutoModelForAudioXVector"}
549
  {"model_class":"Wav2Vec2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
 
 
 
550
  {"model_class":"XLMForMultipleChoice","pipeline_tag":"multiple-choice","auto_class":"AutoModelForMultipleChoice"}
551
  {"model_class":"XLMForQuestionAnsweringSimple","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
552
  {"model_class":"XLMForSequenceClassification","pipeline_tag":"text-classification","auto_class":"AutoModelForSequenceClassification"}
 
547
  {"model_class":"Wav2Vec2ForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
548
  {"model_class":"Wav2Vec2ForXVector","pipeline_tag":"audio-xvector","auto_class":"AutoModelForAudioXVector"}
549
  {"model_class":"Wav2Vec2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
550
+ {"model_class":"WavLMForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
551
+ {"model_class":"WavLMForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
552
+ {"model_class":"WavLMModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
553
  {"model_class":"XLMForMultipleChoice","pipeline_tag":"multiple-choice","auto_class":"AutoModelForMultipleChoice"}
554
  {"model_class":"XLMForQuestionAnsweringSimple","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
555
  {"model_class":"XLMForSequenceClassification","pipeline_tag":"text-classification","auto_class":"AutoModelForSequenceClassification"}