Matelq-2 commited on
Commit
114124e
·
verified ·
1 Parent(s): 020393f

Training in progress, epoch 1

Browse files
adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "HuggingFaceTB/SmolLM2-135M",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 8,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 6,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "o_proj",
28
+ "down_proj",
29
+ "up_proj",
30
+ "v_proj",
31
+ "gate_proj",
32
+ "k_proj",
33
+ "q_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32588e6b28c898c9a1b1d69c00023d5e932f2656f4f7e16fc4342628630b49ff
3
+ size 7380952
chat_template.jinja CHANGED
@@ -1,6 +1,4 @@
1
- {% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system
2
- You are a helpful AI assistant named SmolLM, trained by Hugging Face<|im_end|>
3
- ' }}{% endif %}{{'<|im_start|>' + message['role'] + '
4
  ' + message['content'] + '<|im_end|>' + '
5
  '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
6
  ' }}{% endif %}
 
1
+ {% for message in messages %}{{'<|im_start|>' + message['role'] + '
 
 
2
  ' + message['content'] + '<|im_end|>' + '
3
  '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
4
  ' }}{% endif %}
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
tokenizer_config.json CHANGED
@@ -143,9 +143,9 @@
143
  "<|im_end|>"
144
  ],
145
  "bos_token": "<|im_start|>",
146
- "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
  "clean_up_tokenization_spaces": false,
148
  "eos_token": "<|im_end|>",
 
149
  "model_max_length": 8192,
150
  "pad_token": "<|im_end|>",
151
  "tokenizer_class": "GPT2Tokenizer",
 
143
  "<|im_end|>"
144
  ],
145
  "bos_token": "<|im_start|>",
 
146
  "clean_up_tokenization_spaces": false,
147
  "eos_token": "<|im_end|>",
148
+ "extra_special_tokens": {},
149
  "model_max_length": 8192,
150
  "pad_token": "<|im_end|>",
151
  "tokenizer_class": "GPT2Tokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d1d0ada5422b14973d94b7e8ed9c0d57c236e0f3e1fa230383ad54153118214
3
  size 5969
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:853181e73e812bd1c7423af8e2d7d4294026999255e727deb8bc9fbabe797af9
3
  size 5969