Upload folder using huggingface_hub
Browse files- config.json +70 -0
- generation_config.json +8 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- recipe.yaml +18 -0
- special_tokens_map.json +34 -0
- test_prompts.py +106 -0
- tokenizer.json +0 -0
- tokenizer_config.json +154 -0
- vocab.json +0 -0
config.json
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/root/.cache/huggingface/hub/models--HuggingFaceTB--SmolLM-360M-Instruct/snapshots/73b7144f76331266f5f45d5642fd8da653583b13",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 1,
|
9 |
+
"compression_config": {
|
10 |
+
"config_groups": {
|
11 |
+
"group_0": {
|
12 |
+
"input_activations": {
|
13 |
+
"block_structure": null,
|
14 |
+
"dynamic": true,
|
15 |
+
"group_size": null,
|
16 |
+
"num_bits": 8,
|
17 |
+
"observer": "memoryless",
|
18 |
+
"observer_kwargs": {},
|
19 |
+
"strategy": "token",
|
20 |
+
"symmetric": true,
|
21 |
+
"type": "int"
|
22 |
+
},
|
23 |
+
"output_activations": null,
|
24 |
+
"targets": [
|
25 |
+
"Linear"
|
26 |
+
],
|
27 |
+
"weights": {
|
28 |
+
"block_structure": null,
|
29 |
+
"dynamic": false,
|
30 |
+
"group_size": null,
|
31 |
+
"num_bits": 8,
|
32 |
+
"observer": "minmax",
|
33 |
+
"observer_kwargs": {},
|
34 |
+
"strategy": "channel",
|
35 |
+
"symmetric": true,
|
36 |
+
"type": "int"
|
37 |
+
}
|
38 |
+
}
|
39 |
+
},
|
40 |
+
"format": "int-quantized",
|
41 |
+
"global_compression_ratio": 1.2392030626348693,
|
42 |
+
"ignore": [
|
43 |
+
"lm_head"
|
44 |
+
],
|
45 |
+
"kv_cache_scheme": null,
|
46 |
+
"quant_method": "compressed-tensors",
|
47 |
+
"quantization_status": "frozen"
|
48 |
+
},
|
49 |
+
"eos_token_id": 2,
|
50 |
+
"hidden_act": "silu",
|
51 |
+
"hidden_size": 960,
|
52 |
+
"initializer_range": 0.02,
|
53 |
+
"intermediate_size": 2560,
|
54 |
+
"max_position_embeddings": 2048,
|
55 |
+
"mlp_bias": false,
|
56 |
+
"model_type": "llama",
|
57 |
+
"num_attention_heads": 15,
|
58 |
+
"num_hidden_layers": 32,
|
59 |
+
"num_key_value_heads": 5,
|
60 |
+
"pad_token_id": 2,
|
61 |
+
"pretraining_tp": 1,
|
62 |
+
"rms_norm_eps": 1e-05,
|
63 |
+
"rope_scaling": null,
|
64 |
+
"rope_theta": 10000.0,
|
65 |
+
"tie_word_embeddings": true,
|
66 |
+
"torch_dtype": "bfloat16",
|
67 |
+
"transformers_version": "4.44.1",
|
68 |
+
"use_cache": true,
|
69 |
+
"vocab_size": 49152
|
70 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"max_new_tokens": 40,
|
6 |
+
"pad_token_id": 2,
|
7 |
+
"transformers_version": "4.44.1"
|
8 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d9e10091b239ca3f8324d124299f98ae58d81ea7fc9ea40cf1eea4f445c9de75
|
3 |
+
size 504052632
|
recipe.yaml
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
quant_stage:
|
2 |
+
quant_modifiers:
|
3 |
+
SmoothQuantModifier:
|
4 |
+
smoothing_strength: 0.8
|
5 |
+
mappings:
|
6 |
+
- - ['re:.*q_proj', 're:.*k_proj', 're:.*v_proj']
|
7 |
+
- re:.*input_layernorm
|
8 |
+
- - ['re:.*gate_proj', 're:.*up_proj']
|
9 |
+
- re:.*post_attention_layernorm
|
10 |
+
- - ['re:.*down_proj']
|
11 |
+
- re:.*up_proj
|
12 |
+
GPTQModifier:
|
13 |
+
sequential_update: false
|
14 |
+
dampening_frac: 0.01
|
15 |
+
ignore: [lm_head]
|
16 |
+
scheme: W8A8
|
17 |
+
targets: Linear
|
18 |
+
observer: mse
|
special_tokens_map.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>"
|
5 |
+
],
|
6 |
+
"bos_token": {
|
7 |
+
"content": "<|im_start|>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false
|
12 |
+
},
|
13 |
+
"eos_token": {
|
14 |
+
"content": "<|im_end|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false
|
19 |
+
},
|
20 |
+
"pad_token": {
|
21 |
+
"content": "<|im_end|>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": false,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false
|
26 |
+
},
|
27 |
+
"unk_token": {
|
28 |
+
"content": "<|endoftext|>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false
|
33 |
+
}
|
34 |
+
}
|
test_prompts.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
|
3 |
+
BASE_PATH = "/fsx/loubna/projects/alignment-handbook/recipes/cosmo2/sft/data"
|
4 |
+
TEMPERATURE = 0.2
|
5 |
+
TOP_P = 0.9
|
6 |
+
|
7 |
+
CHECKPOINT = "loubnabnl/smollm-350M-instruct-add-basics"
|
8 |
+
|
9 |
+
print(f"💾 Loading the model and tokenizer: {CHECKPOINT}...")
|
10 |
+
device = "cuda"
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)
|
12 |
+
model_s = AutoModelForCausalLM.from_pretrained(CHECKPOINT).to(device)
|
13 |
+
|
14 |
+
print("🧪 Testing single-turn conversations...")
|
15 |
+
L = [
|
16 |
+
"Hi",
|
17 |
+
"Hello",
|
18 |
+
"Tell me a joke",
|
19 |
+
"Who are you?",
|
20 |
+
"What's your name?",
|
21 |
+
"How do I make pancakes?",
|
22 |
+
"Can you tell me what is gravity?",
|
23 |
+
"What is the capital of Morocco?",
|
24 |
+
"What's 2+2?",
|
25 |
+
"Hi, what is 2+1?",
|
26 |
+
"What's 3+5?",
|
27 |
+
"Write a poem about Helium",
|
28 |
+
"Hi, what are some popular dishes from Japan?",
|
29 |
+
]
|
30 |
+
|
31 |
+
|
32 |
+
for i in range(len(L)):
|
33 |
+
print(f"🔮 {L[i]}")
|
34 |
+
messages = [{"role": "user", "content": L[i]}]
|
35 |
+
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
36 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
37 |
+
outputs = model_s.generate(
|
38 |
+
inputs, max_new_tokens=200, top_p=TOP_P, do_sample=True, temperature=TEMPERATURE
|
39 |
+
)
|
40 |
+
with open(
|
41 |
+
f"{BASE_PATH}/{CHECKPOINT.split('/')[-1]}_temp_{TEMPERATURE}_topp{TOP_P}.txt",
|
42 |
+
"a",
|
43 |
+
) as f:
|
44 |
+
f.write("=" * 50 + "\n")
|
45 |
+
f.write(tokenizer.decode(outputs[0]))
|
46 |
+
f.write("\n")
|
47 |
+
|
48 |
+
|
49 |
+
print("🧪 Now testing multi-turn conversations...")
|
50 |
+
# Multi-turn conversations
|
51 |
+
messages_1 = [
|
52 |
+
{"role": "user", "content": "Hi"},
|
53 |
+
{"role": "assistant", "content": "Hello! How can I help you today?"},
|
54 |
+
{"role": "user", "content": "What's 2+2?"},
|
55 |
+
]
|
56 |
+
messages_2 = [
|
57 |
+
{"role": "user", "content": "Hi"},
|
58 |
+
{"role": "assistant", "content": "Hello! How can I help you today?"},
|
59 |
+
{"role": "user", "content": "What's 2+2?"},
|
60 |
+
{"role": "assistant", "content": "4"},
|
61 |
+
{"role": "user", "content": "Why?"},
|
62 |
+
]
|
63 |
+
messages_3 = [
|
64 |
+
{"role": "user", "content": "Who are you?"},
|
65 |
+
{"role": "assistant", "content": "I am an AI assistant. How can I help you today?"},
|
66 |
+
{"role": "user", "content": "What's your name?"},
|
67 |
+
]
|
68 |
+
messages_4 = [
|
69 |
+
{"role": "user", "content": "Tell me a joke"},
|
70 |
+
{"role": "assistant", "content": "Sure! Why did the tomato turn red?"},
|
71 |
+
{"role": "user", "content": "Why?"},
|
72 |
+
]
|
73 |
+
messages_5 = [
|
74 |
+
{"role": "user", "content": "Can you tell me what is gravity?"},
|
75 |
+
{
|
76 |
+
"role": "assistant",
|
77 |
+
"content": "Sure! Gravity is a force that attracts objects toward each other. It is what keeps us on the ground and what makes things fall.",
|
78 |
+
},
|
79 |
+
{"role": "user", "content": "Who discovered it?"},
|
80 |
+
]
|
81 |
+
messages_6 = [
|
82 |
+
{"role": "user", "content": "How do I make pancakes?"},
|
83 |
+
{
|
84 |
+
"role": "assistant",
|
85 |
+
"content": "Sure! Here is a simple recipe for pancakes: Ingredients: 1 cup flour, 1 cup milk, 1 egg, 1 tbsp sugar, 1 tsp baking powder, 1/2 tsp salt. Instructions: 1. Mix all the dry ingredients together in a bowl. 2. Add the milk and egg and mix until smooth. 3. Heat a non-stick pan over medium heat. 4. Pour 1/4 cup of batter onto the pan. 5. Cook until bubbles form on the surface, then flip and cook for another minute. 6. Serve with your favorite toppings.",
|
86 |
+
},
|
87 |
+
{"role": "user", "content": "What are some popular toppings?"},
|
88 |
+
]
|
89 |
+
|
90 |
+
L = [messages_1, messages_2, messages_3, messages_4, messages_5, messages_6]
|
91 |
+
|
92 |
+
for i in range(len(L)):
|
93 |
+
input_text = tokenizer.apply_chat_template(L[i], tokenize=False)
|
94 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
95 |
+
outputs = model_s.generate(
|
96 |
+
inputs, max_new_tokens=200, top_p=TOP_P, do_sample=True, temperature=TEMPERATURE
|
97 |
+
)
|
98 |
+
with open(
|
99 |
+
f"{BASE_PATH}/{CHECKPOINT.split('/')[-1]}_temp_{TEMPERATURE}_topp{TOP_P}_MT.txt",
|
100 |
+
"a",
|
101 |
+
) as f:
|
102 |
+
f.write("=" * 50 + "\n")
|
103 |
+
f.write(tokenizer.decode(outputs[0]))
|
104 |
+
f.write("\n")
|
105 |
+
|
106 |
+
print("🔥 Done!")
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"0": {
|
5 |
+
"content": "<|endoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": false,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"1": {
|
13 |
+
"content": "<|im_start|>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": false,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
},
|
20 |
+
"2": {
|
21 |
+
"content": "<|im_end|>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": false,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": true
|
27 |
+
},
|
28 |
+
"3": {
|
29 |
+
"content": "<repo_name>",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": false,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false,
|
34 |
+
"special": true
|
35 |
+
},
|
36 |
+
"4": {
|
37 |
+
"content": "<reponame>",
|
38 |
+
"lstrip": false,
|
39 |
+
"normalized": false,
|
40 |
+
"rstrip": false,
|
41 |
+
"single_word": false,
|
42 |
+
"special": true
|
43 |
+
},
|
44 |
+
"5": {
|
45 |
+
"content": "<file_sep>",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": false,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false,
|
50 |
+
"special": true
|
51 |
+
},
|
52 |
+
"6": {
|
53 |
+
"content": "<filename>",
|
54 |
+
"lstrip": false,
|
55 |
+
"normalized": false,
|
56 |
+
"rstrip": false,
|
57 |
+
"single_word": false,
|
58 |
+
"special": true
|
59 |
+
},
|
60 |
+
"7": {
|
61 |
+
"content": "<gh_stars>",
|
62 |
+
"lstrip": false,
|
63 |
+
"normalized": false,
|
64 |
+
"rstrip": false,
|
65 |
+
"single_word": false,
|
66 |
+
"special": true
|
67 |
+
},
|
68 |
+
"8": {
|
69 |
+
"content": "<issue_start>",
|
70 |
+
"lstrip": false,
|
71 |
+
"normalized": false,
|
72 |
+
"rstrip": false,
|
73 |
+
"single_word": false,
|
74 |
+
"special": true
|
75 |
+
},
|
76 |
+
"9": {
|
77 |
+
"content": "<issue_comment>",
|
78 |
+
"lstrip": false,
|
79 |
+
"normalized": false,
|
80 |
+
"rstrip": false,
|
81 |
+
"single_word": false,
|
82 |
+
"special": true
|
83 |
+
},
|
84 |
+
"10": {
|
85 |
+
"content": "<issue_closed>",
|
86 |
+
"lstrip": false,
|
87 |
+
"normalized": false,
|
88 |
+
"rstrip": false,
|
89 |
+
"single_word": false,
|
90 |
+
"special": true
|
91 |
+
},
|
92 |
+
"11": {
|
93 |
+
"content": "<jupyter_start>",
|
94 |
+
"lstrip": false,
|
95 |
+
"normalized": false,
|
96 |
+
"rstrip": false,
|
97 |
+
"single_word": false,
|
98 |
+
"special": true
|
99 |
+
},
|
100 |
+
"12": {
|
101 |
+
"content": "<jupyter_text>",
|
102 |
+
"lstrip": false,
|
103 |
+
"normalized": false,
|
104 |
+
"rstrip": false,
|
105 |
+
"single_word": false,
|
106 |
+
"special": true
|
107 |
+
},
|
108 |
+
"13": {
|
109 |
+
"content": "<jupyter_code>",
|
110 |
+
"lstrip": false,
|
111 |
+
"normalized": false,
|
112 |
+
"rstrip": false,
|
113 |
+
"single_word": false,
|
114 |
+
"special": true
|
115 |
+
},
|
116 |
+
"14": {
|
117 |
+
"content": "<jupyter_output>",
|
118 |
+
"lstrip": false,
|
119 |
+
"normalized": false,
|
120 |
+
"rstrip": false,
|
121 |
+
"single_word": false,
|
122 |
+
"special": true
|
123 |
+
},
|
124 |
+
"15": {
|
125 |
+
"content": "<jupyter_script>",
|
126 |
+
"lstrip": false,
|
127 |
+
"normalized": false,
|
128 |
+
"rstrip": false,
|
129 |
+
"single_word": false,
|
130 |
+
"special": true
|
131 |
+
},
|
132 |
+
"16": {
|
133 |
+
"content": "<empty_output>",
|
134 |
+
"lstrip": false,
|
135 |
+
"normalized": false,
|
136 |
+
"rstrip": false,
|
137 |
+
"single_word": false,
|
138 |
+
"special": true
|
139 |
+
}
|
140 |
+
},
|
141 |
+
"additional_special_tokens": [
|
142 |
+
"<|im_start|>",
|
143 |
+
"<|im_end|>"
|
144 |
+
],
|
145 |
+
"bos_token": "<|im_start|>",
|
146 |
+
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
147 |
+
"clean_up_tokenization_spaces": false,
|
148 |
+
"eos_token": "<|im_end|>",
|
149 |
+
"model_max_length": 2048,
|
150 |
+
"pad_token": "<|im_end|>",
|
151 |
+
"tokenizer_class": "GPT2Tokenizer",
|
152 |
+
"unk_token": "<|endoftext|>",
|
153 |
+
"vocab_size": 49152
|
154 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|