|
{ |
|
"alpha": 0.5, |
|
"architectures": [ |
|
"LLAMIAFluxForConditionalGeneration" |
|
], |
|
"feature_extraction_model_name": "openai/clip-vit-large-patch14-336", |
|
"ignore_index": -100, |
|
"im_end_token_ids": [ |
|
22313, |
|
29918, |
|
11794 |
|
], |
|
"im_start_token_ids": [ |
|
22313, |
|
29918, |
|
25826 |
|
], |
|
"image_gen_context_length": 256, |
|
"image_seq_length": 576, |
|
"image_token_index": 32000, |
|
"model_type": "LLAMIAFlux", |
|
"multimodal_projector_bias": true, |
|
"num_image_generation_heads": 4, |
|
"pad_token_id": 32001, |
|
"projector_hidden_act": "gelu", |
|
"text_config": { |
|
"_name_or_path": "lmsys/vicuna-7b-v1.5", |
|
"architectures": [ |
|
"LlamaForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"head_dim": 128, |
|
"hidden_act": "silu", |
|
"hidden_size": 4096, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 11008, |
|
"max_position_embeddings": 4096, |
|
"mlp_bias": false, |
|
"model_type": "llama", |
|
"num_attention_heads": 32, |
|
"num_hidden_layers": 32, |
|
"num_key_value_heads": 32, |
|
"pretraining_tp": 1, |
|
"rms_norm_eps": 1e-05, |
|
"rope_scaling": null, |
|
"rope_theta": 10000.0, |
|
"torch_dtype": "float32", |
|
"use_cache": true, |
|
"vocab_size": 32064 |
|
}, |
|
"tie_word_embeddings": false, |
|
"torch_dtype": "float32", |
|
"transformers_version": "4.51.1", |
|
"vision_config": { |
|
"attention_dropout": 0.0, |
|
"hidden_act": "quick_gelu", |
|
"hidden_size": 1024, |
|
"image_size": 336, |
|
"initializer_factor": 1.0, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 4096, |
|
"layer_norm_eps": 1e-05, |
|
"model_type": "clip_vision_model", |
|
"num_attention_heads": 16, |
|
"num_channels": 3, |
|
"num_hidden_layers": 24, |
|
"patch_size": 14, |
|
"projection_dim": 768, |
|
"torch_dtype": "float32", |
|
"vocab_size": 32000 |
|
}, |
|
"vision_feature_layer": -2, |
|
"vision_feature_select_strategy": "default", |
|
"vision_model_name": "openai/clip-vit-large-patch14", |
|
"vocab_size": 32064 |
|
} |
|
|