sgugger commited on
Commit
d66d2f6
1 Parent(s): e8aa89f

Update with commit 30ed3adf474aaf2972ab56f5624089bc24a6adf3

Browse files

See: https://github.com/huggingface/transformers/commit/30ed3adf474aaf2972ab56f5624089bc24a6adf3

Files changed (2) hide show
  1. frameworks.json +1 -0
  2. pipeline_tags.json +6 -0
frameworks.json CHANGED
@@ -104,6 +104,7 @@
104
  {"model_type":"mobilevit","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
105
  {"model_type":"mobilevitv2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
106
  {"model_type":"mpnet","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
 
107
  {"model_type":"mt5","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
108
  {"model_type":"musicgen","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
109
  {"model_type":"mvp","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
 
104
  {"model_type":"mobilevit","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
105
  {"model_type":"mobilevitv2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
106
  {"model_type":"mpnet","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
107
+ {"model_type":"mra","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
108
  {"model_type":"mt5","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
109
  {"model_type":"musicgen","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
110
  {"model_type":"mvp","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
pipeline_tags.json CHANGED
@@ -474,6 +474,12 @@
474
  {"model_class":"MobileViTModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
475
  {"model_class":"MobileViTV2ForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
476
  {"model_class":"MobileViTV2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
 
 
 
 
 
 
477
  {"model_class":"MusicgenForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
478
  {"model_class":"MvpForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
479
  {"model_class":"MvpForConditionalGeneration","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
 
474
  {"model_class":"MobileViTModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
475
  {"model_class":"MobileViTV2ForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
476
  {"model_class":"MobileViTV2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
477
+ {"model_class":"MraForMaskedLM","pipeline_tag":"fill-mask","auto_class":"AutoModelForMaskedLM"}
478
+ {"model_class":"MraForMultipleChoice","pipeline_tag":"multiple-choice","auto_class":"AutoModelForMultipleChoice"}
479
+ {"model_class":"MraForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
480
+ {"model_class":"MraForSequenceClassification","pipeline_tag":"text-classification","auto_class":"AutoModelForSequenceClassification"}
481
+ {"model_class":"MraForTokenClassification","pipeline_tag":"token-classification","auto_class":"AutoModelForTokenClassification"}
482
+ {"model_class":"MraModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
483
  {"model_class":"MusicgenForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
484
  {"model_class":"MvpForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
485
  {"model_class":"MvpForConditionalGeneration","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}