{ "vit_hidden_dim": 768, "vit_inter_dim": 3072, "vit_patch_size": 16, "vit_img_size": 224, "vit_n_heads": 12, "vit_dropout": 0.0, "vit_n_blocks": 12, "vit_ln_eps": 1e-06, "vit_cls_flag": false, "vit_model_type": "google/siglip-base-patch16-224", "vae_enable": true, "vae_attn_resolutions": [ 3 ], "vae_ch": 256, "vae_ch_mult": [ 1, 2, 2, 4 ], "vae_codebook_size": 32768, "vae_double_z": false, "vae_dropout": 0.0, "vae_embed_dim": 4, "vae_in_channels": 3, "vae_num_res_blocks": 2, "vae_out_channels": 3, "vae_temporal_downsample_factor": 4, "vae_torch_dtype": "float32", "vae_z_channels": 4, "vae_model_type": "BAAI/Emu3-VisionTokenizer", "lm_hidden_dim": 576, "lm_inter_dim": 1536, "lm_rms_eps": 1e-05, "lm_re_base": 100000, "lm_max_position_embeddings": 8192, "lm_vocab_size": 81922, "lm_n_heads": 9, "lm_n_kv_heads": 3, "lm_dropout": 0.0, "lm_n_blocks": 30, "lm_attn_scaling": 1.0, "TOTAL_SEQUENCE_LENGTH": 1280, "lm_max_length": 1280, "lm_use_tokens": true, "lm_tie_weights": true, "lm_model_type": "HuggingFaceTB/SmolLM2-135M", "lm_tokenizer": "HuggingFaceTB/cosmo2-tokenizer", "lm_eos_token_id": 0, "mp_pixel_shuffle_factor": 2, "vlm_load_backbone_weights": true, "vlm_checkpoint_path": "checkpoints/", "hf_repo_name": "blanchon/nanoVLM-222M" }