lysandre HF staff commited on
Commit
8c2c945
1 Parent(s): 8849ba7

Update with commit 29f1aee3b6c560182415fcb9e2238125e2f5b29c

Browse files

See: https://github.com/huggingface/transformers/commit/29f1aee3b6c560182415fcb9e2238125e2f5b29c

Files changed (2) hide show
  1. frameworks.json +1 -0
  2. pipeline_tags.json +4 -0
frameworks.json CHANGED
@@ -155,6 +155,7 @@
155
  {"model_type":"rwkv","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
156
  {"model_type":"sam","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoProcessor"}
157
  {"model_type":"seamless_m4t","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
 
158
  {"model_type":"segformer","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
159
  {"model_type":"sew","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
160
  {"model_type":"sew-d","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
 
155
  {"model_type":"rwkv","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
156
  {"model_type":"sam","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoProcessor"}
157
  {"model_type":"seamless_m4t","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
158
+ {"model_type":"seamless_m4t_v2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
159
  {"model_type":"segformer","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
160
  {"model_type":"sew","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
161
  {"model_type":"sew-d","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
pipeline_tags.json CHANGED
@@ -645,6 +645,10 @@
645
  {"model_class":"SeamlessM4TForTextToSpeech","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
646
  {"model_class":"SeamlessM4TForTextToText","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
647
  {"model_class":"SeamlessM4TModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
 
 
 
 
648
  {"model_class":"SegformerForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
649
  {"model_class":"SegformerModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
650
  {"model_class":"Speech2Text2ForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
 
645
  {"model_class":"SeamlessM4TForTextToSpeech","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
646
  {"model_class":"SeamlessM4TForTextToText","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
647
  {"model_class":"SeamlessM4TModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
648
+ {"model_class":"SeamlessM4Tv2ForSpeechToText","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForSpeechSeq2Seq"}
649
+ {"model_class":"SeamlessM4Tv2ForTextToSpeech","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
650
+ {"model_class":"SeamlessM4Tv2ForTextToText","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
651
+ {"model_class":"SeamlessM4Tv2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
652
  {"model_class":"SegformerForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
653
  {"model_class":"SegformerModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
654
  {"model_class":"Speech2Text2ForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}