Transformers
PyTorch
clip
Inference Endpoints
File size: 467 Bytes
64bd21d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
{
  "_name_or_path": "./hf/nllb-clip-base",
  "architectures": [
    "NLLBCLIPModel"
  ],
  "initializer_factor": 1.0,
  "logit_scale_init_value": 2.6592,
  "model_type": "clip",
  "projection_dim": 512,
  "text_config": {
    "encoder_layerdrop": 0,
    "model_type": "clip_text_model",
    "vocab_size": 256206
  },
  "torch_dtype": "float32",
  "transformers_version": "4.33.1",
  "vision_config": {
    "dropout": 0.0,
    "model_type": "clip_vision_model"
  }
}