{ "architectures": [ "DecoderOnlyT5Model" ], "auto_map": { "AutoConfig": "decoderonlyt5_config.DecoderOnlyT5Config", "AutoModelForCausalLM": "decoderonlyt5_modeling.DecoderOnlyT5Model" }, "d_ff": 16384, "d_kv": 256, "d_model": 4096, "dropout_rate": 0.0, "decoder_start_token_id": 0, "pad_token_id": 1, "eos_token_id": 3, "feed_forward_proj": "gated-swish", "initializer_factor": 1.0, "is_encoder_decoder": false, "is_decoder_only": true, "layer_norm_epsilon": 1e-06, "model_type": "t5", "n_positions": 512, "num_layers": 0, "num_decoder_layers": 32, "num_heads": 16, "output_past": true, "relative_attention_max_distance": 128, "relative_attention_num_buckets": 32, "task_specific_params": {}, "tie_word_embeddings": true, "transformers_version": "4.23.1", "use_cache": true, "vocab_size": 256512, "parallel_layers": true, "has_relative_attention_bias": false, "multi_query_attention": true, "use_rotary_embedding": true, "rotary_embedding_max_timescale": 1000 }