Safetensors
clip
zer0int commited on
Commit
1694b45
·
verified ·
1 Parent(s): cca2031

Add CLIP-KO-LITE

Browse files

A CLIP ViT-L/14, resilient to typographic attacks - thanks to k_proj orthogonalization and attention head dropout during fine-tuning (+ some adversarial training)!

ViT-L-14-KO-LITE-HuggingFace-TE-only.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cb0d53cc825af9303b50fee950e705063f0231d064fb75b53774ca224b0e6ee
3
+ size 494624596
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPModel"
4
+ ],
5
+ "initializer_factor": 1.0,
6
+ "logit_scale_init_value": 4.6052,
7
+ "model_type": "clip",
8
+ "projection_dim": 768,
9
+ "text_config": {
10
+ "attention_dropout": 0.0,
11
+ "dropout": 0.0,
12
+ "hidden_act": "quick_gelu",
13
+ "hidden_size": 768,
14
+ "initializer_factor": 1.0,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 3072,
17
+ "layer_norm_eps": 1e-05,
18
+ "max_position_embeddings": 77,
19
+ "model_type": "clip_text_model",
20
+ "num_attention_heads": 12,
21
+ "num_hidden_layers": 12,
22
+ "projection_dim": 768,
23
+ "vocab_size": 49408
24
+ },
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.52.3",
27
+ "vision_config": {
28
+ "attention_dropout": 0.0,
29
+ "dropout": 0.0,
30
+ "hidden_act": "quick_gelu",
31
+ "hidden_size": 1024,
32
+ "image_size": 224,
33
+ "initializer_factor": 1.0,
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 4096,
36
+ "layer_norm_eps": 1e-05,
37
+ "model_type": "clip_vision_model",
38
+ "num_attention_heads": 16,
39
+ "num_channels": 3,
40
+ "num_hidden_layers": 24,
41
+ "patch_size": 14,
42
+ "projection_dim": 768
43
+ }
44
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b01ca81157eb1a1826230cf0603d330524397972cb266396d2829b5e7013dc0
3
+ size 1710537716
preprocessor_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": 224,
3
+ "do_center_crop": true,
4
+ "do_normalize": true,
5
+ "do_resize": true,
6
+ "feature_extractor_type": "CLIPFeatureExtractor",
7
+ "image_mean": [
8
+ 0.48145466,
9
+ 0.4578275,
10
+ 0.40821073
11
+ ],
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "resample": 3,
18
+ "size": 224
19
+ }
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "unk_token": {
3
+ "content": "<|endoftext|>",
4
+ "single_word": false,
5
+ "lstrip": false,
6
+ "rstrip": false,
7
+ "normalized": true,
8
+ "__type": "AddedToken"
9
+ },
10
+ "bos_token": {
11
+ "content": "<|startoftext|>",
12
+ "single_word": false,
13
+ "lstrip": false,
14
+ "rstrip": false,
15
+ "normalized": true,
16
+ "__type": "AddedToken"
17
+ },
18
+ "eos_token": {
19
+ "content": "<|endoftext|>",
20
+ "single_word": false,
21
+ "lstrip": false,
22
+ "rstrip": false,
23
+ "normalized": true,
24
+ "__type": "AddedToken"
25
+ },
26
+ "pad_token": "<|endoftext|>",
27
+ "add_prefix_space": false,
28
+ "errors": "replace",
29
+ "do_lower_case": true,
30
+ "name_or_path": "zer0int/CLIP-GmP-ViT-L-14",
31
+ "model_max_length": 77,
32
+ "special_tokens_map_file": "./special_tokens_map.json",
33
+ "tokenizer_class": "CLIPTokenizer"
34
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff