Jingya's picture
Jingya HF Staff
Upload config.json with huggingface_hub
c74507b verified
{
"_attn_implementation_autoset": true,
"_name_or_path": "/tmp/tmptlh0zdli",
"architectures": [
"CLIPModel"
],
"initializer_factor": 1.0,
"logit_scale_init_value": 2.6592,
"model_type": "clip",
"neuron": {
"auto_cast": "matmul",
"auto_cast_type": "bf16",
"compiler_type": "neuronx-cc",
"compiler_version": "2.16.372.0+4a9b2326",
"disable_fallback": false,
"disable_fast_relayout": false,
"dynamic_batch_size": false,
"inline_weights_to_neff": true,
"input_names": [
"input_ids",
"pixel_values",
"attention_mask"
],
"model_type": "clip",
"optlevel": "2",
"output_attentions": false,
"output_hidden_states": false,
"output_names": [
"logits_per_image",
"logits_per_text",
"text_embeds",
"image_embeds",
"text_model_output",
"vision_model_output"
],
"static_height": 224,
"static_image_batch_size": 1,
"static_num_channels": 3,
"static_sequence_length": 77,
"static_text_batch_size": 2,
"static_width": 224,
"tensor_parallel_size": 1
},
"projection_dim": 512,
"task": "feature-extraction",
"text_config": {
"bos_token_id": 0,
"dropout": 0.0,
"eos_token_id": 2,
"model_type": "clip_text_model",
"torch_dtype": "float32"
},
"torch_dtype": "float32",
"torchscript": true,
"transformers_version": "4.49.0",
"vision_config": {
"dropout": 0.0,
"model_type": "clip_vision_model",
"torch_dtype": "float32"
}
}