Update config.json
I'm getting error while running vLLM "0.9.2dev" with the latest transformers
AttributeError: 'Gemma3nConfig' object has no attribute 'vocab_size'
after that i updated the "config.json" and added"vocab_size": 262400
then i got another errors:
AttributeError: 'Gemma3nConfig' object has no attribute 'hidden_size'
after that i updated the "config.json" and added
"hidden_size": 4096,
"num_hidden_layers": 80,
"num_attention_heads": 32,
but with no luck
Please can you share how to run it smoothly via vLLM or sharing your config.json
Any help would be appreciated, THX
I'm also getting the same error. Please let me know if there is a way to run this with vLLM
I am currently using transformers version 4.53.0 and vllm version 0.7.4. With the following config.json, I am now able to run vLLM successfully.
{
"architectures": [
"Gemma3nForConditionalGeneration"
],
"hidden_activation": "gelu_pytorch_tanh",
"hidden_size": 2048,
"hidden_size_per_layer_input": 256,
"initializer_range": 0.02,
"intermediate_size": 16384,
"laurel_rank": 64,
"vocab_size": 262400,
"head_dim": 256,
"base_model_tp_plan": {},
"max_position_embeddings": 32768,
"model_type": "gemma3n_text",
"num_attention_heads": 8,
"num_hidden_layers": 35,
"num_key_value_heads": 2,
"num_kv_shared_layers": 15,
"query_pre_attn_scalar": 256,
"rms_norm_eps": 1e-06,
"rope_local_base_freq": 10000.0,
"rope_scaling": null,
"rope_theta": 1000000.0,
"sliding_window": 512,
"torch_dtype": "bfloat16",
"use_cache": true,
"audio_config": {
"conf_attention_chunk_size": 12,
"conf_attention_context_left": 13,
"conf_attention_context_right": 0,
"conf_attention_logit_cap": 50.0,
"conf_conv_kernel_size": 5,
"conf_num_attention_heads": 8,
"conf_num_hidden_layers": 12,
"conf_positional_bias_size": 256,
"conf_reduction_factor": 4,
"conf_residual_weight": 0.5,
"gradient_clipping": 10000000000.0,
"hidden_size": 1536,
"input_feat_size": 128,
"model_type": "gemma3n_audio",
"rms_norm_eps": 1e-06,
"sscp_conv_channel_size": [
128,
32
],
"sscp_conv_eps": 0.001,
"sscp_conv_kernel_size": [
[
3,
3
],
[
3,
3
]
],
"sscp_conv_stride_size": [
[
2,
2
],
[
2,
2
]
],
"torch_dtype": "bfloat16",
"vocab_offset": 262273,
"vocab_size":128
},
"audio_soft_tokens_per_image": 188,
"audio_token_id": 262273,
"boa_token_id": 256000,
"boi_token_id": 255999,
"eoa_token_id": 262272,
"eoi_token_id": 262144,
"eos_token_id": [
1,
106
],
"image_token_id": 262145,
"initializer_range": 0.02,
"model_type": "gemma3n",
"text_config": {
"activation_sparsity_pattern": [
0.95,
0.95,
0.95,
0.95,
0.95,
0.95,
0.95,
0.95,
0.95,
0.95,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0
],
"altup_active_idx": 0,
"altup_coef_clip": 120.0,
"altup_correct_scale": true,
"altup_lr_multiplier": 1.0,
"altup_num_inputs": 4,
"attention_bias": false,
"attention_dropout": 0.0,
"final_logit_softcapping": 30.0,
"head_dim": 256,
"hidden_activation": "gelu_pytorch_tanh",
"hidden_size": 2048,
"hidden_size_per_layer_input": 256,
"initializer_range": 0.02,
"intermediate_size": 16384,
"laurel_rank": 64,
"layer_types": [
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention"
],
"max_position_embeddings": 32768,
"model_type": "gemma3n_text",
"num_attention_heads": 8,
"num_hidden_layers": 35,
"num_key_value_heads": 2,
"num_kv_shared_layers": 15,
"query_pre_attn_scalar": 256,
"rms_norm_eps": 1e-06,
"rope_local_base_freq": 10000.0,
"rope_scaling": null,
"rope_theta": 1000000.0,
"sliding_window": 512,
"torch_dtype": "bfloat16",
"use_cache": true,
"vocab_size": 262400,
"vocab_size_per_layer_input": 262144
},
"torch_dtype": "bfloat16",
"transformers_version": "4.53.0.dev0",
"vision_config": {
"architecture": "mobilenetv5_300m_enc",
"do_pooling": true,
"hidden_size": 2048,
"initializer_range": 0.02,
"label_names": [
"LABEL_0",
"LABEL_1"
],
"model_type": "gemma3n_vision",
"num_classes": 2,
"rms_norm_eps": 1e-06,
"torch_dtype": "bfloat16",
"vocab_offset": 262144,
"vocab_size": 128
},
"vision_soft_tokens_per_image": 256
}
Additionally, using fp32 precision during vLLM deployment may lead to runtime errors.DEBUG 07-01 10:59:16 client.py:171] Heartbeat successful. Sampling probabilities contain NaN or Inf values.
So far, the model runs more stably with bfloat16 (bf16) precision.
@Lqqs , but there is no version of the library vllm 0.7.4, at least on pypi and on the official vllm page on gh
I am currently using transformers version 4.53.0 and vllm version 0.7.4. With the following config.json, I am now able to run vLLM successfully.
Apologies for the oversight — I'm currently using vLLM 0.7.4.dev0, but I believe later versions should be compatible as well.