{ "architectures": [ "NueASRModel" ], "audio_encoder_config": { "architectures": [ "HubertModel" ], "model_type": "hubert", "torch_dtype": "float16" }, "bridge_conv_kernel_size": [ 4, 4 ], "bridge_conv_stride": [ 2, 2 ], "llm_config": { "architectures": [ "GPTNeoXForCausalLM" ], "attention_dropout": 0.1, "bos_token_id": 2, "eos_token_id": 3, "hidden_dropout": 0.1, "hidden_size": 2816, "intermediate_size": 11264, "model_type": "gpt_neox", "num_attention_heads": 22, "num_hidden_layers": 36, "rotary_pct": 1.0, "torch_dtype": "float16", "use_parallel_residual": false, "vocab_size": 32000 }, "model_type": "nue_asr", "torch_dtype": "float16", "transformers_version": "4.33.2" }