aloobun commited on
Commit
0ef0e3f
·
verified ·
1 Parent(s): 2024c1a

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - tinycompany/Shawty-1.4B-SFT-Stage-1
4
+ - tinycompany/shawty-CoT-Hindi-English
5
+ library_name: transformers
6
+ tags:
7
+ - mergekit
8
+ - merge
9
+
10
+ ---
11
+ # MergedShawty-v0.6
12
+
13
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
14
+
15
+ ## Merge Details
16
+ ### Merge Method
17
+
18
+ This model was merged using the [DARE TIES](https://arxiv.org/abs/2311.03099) merge method using [tinycompany/Shawty-1.4B-SFT-Stage-1](https://huggingface.co/tinycompany/Shawty-1.4B-SFT-Stage-1) as a base.
19
+
20
+ ### Models Merged
21
+
22
+ The following models were included in the merge:
23
+ * [tinycompany/shawty-CoT-Hindi-English](https://huggingface.co/tinycompany/shawty-CoT-Hindi-English)
24
+
25
+ ### Configuration
26
+
27
+ The following YAML configuration was used to produce this model:
28
+
29
+ ```yaml
30
+ models:
31
+ - model: tinycompany/Shawty-1.4B-SFT-Stage-1
32
+ parameters:
33
+ density: 1.0
34
+ weight: 0.6
35
+ - model: tinycompany/shawty-CoT-Hindi-English
36
+ parameters:
37
+ density: 1.0
38
+ weight: 0.4
39
+ merge_method: dare_ties
40
+ base_model: tinycompany/Shawty-1.4B-SFT-Stage-1
41
+ parameters:
42
+ normalize: true
43
+ int8_mask: true
44
+ dtype: bfloat16
45
+ tokenizer_source: base
46
+
47
+ ```
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "tinycompany/Shawty-1.4B-SFT-Stage-1",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1536,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 8960,
13
+ "max_position_embeddings": 131072,
14
+ "max_window_layers": 28,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 28,
18
+ "num_key_value_heads": 2,
19
+ "pad_token_id": 0,
20
+ "rms_norm_eps": 1e-06,
21
+ "rope_scaling": null,
22
+ "rope_theta": 1000000.0,
23
+ "sliding_window": null,
24
+ "tie_word_embeddings": true,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.47.0",
27
+ "use_cache": false,
28
+ "use_mrope": false,
29
+ "use_sliding_window": false,
30
+ "vocab_size": 81920
31
+ }
mergekit_config.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models:
2
+ - model: tinycompany/Shawty-1.4B-SFT-Stage-1
3
+ parameters:
4
+ density: 1.0
5
+ weight: 0.6
6
+ - model: tinycompany/shawty-CoT-Hindi-English
7
+ parameters:
8
+ density: 1.0
9
+ weight: 0.4
10
+ merge_method: dare_ties
11
+ base_model: tinycompany/Shawty-1.4B-SFT-Stage-1
12
+ parameters:
13
+ normalize: true
14
+ int8_mask: true
15
+ dtype: bfloat16
16
+ tokenizer_source: base
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c77aed4e195f5764d78bbfe59343b568549d9396680b02a34912d34024413fef
3
+ size 2872377928
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "pad_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ }
16
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "4709": {
12
+ "content": "user",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "27673": {
20
+ "content": "system",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": false
26
+ },
27
+ "81912": {
28
+ "content": "<think>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": false
34
+ },
35
+ "81913": {
36
+ "content": "</think>",
37
+ "lstrip": false,
38
+ "normalized": true,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": false
42
+ },
43
+ "81914": {
44
+ "content": "<|im_end|>",
45
+ "lstrip": false,
46
+ "normalized": true,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": false
50
+ },
51
+ "81915": {
52
+ "content": "<|im_start|>",
53
+ "lstrip": false,
54
+ "normalized": true,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": false
58
+ },
59
+ "81916": {
60
+ "content": "assistant",
61
+ "lstrip": false,
62
+ "normalized": true,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": false
66
+ },
67
+ "81917": {
68
+ "content": "BiBo",
69
+ "lstrip": false,
70
+ "normalized": true,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": false
74
+ },
75
+ "81918": {
76
+ "content": "aloobun",
77
+ "lstrip": false,
78
+ "normalized": true,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": false
82
+ },
83
+ "81919": {
84
+ "content": "LowIQGenAI",
85
+ "lstrip": false,
86
+ "normalized": true,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": false
90
+ }
91
+ },
92
+ "chat_template": "\n{% set system_message = 'You are BiBo, a helpful and friendly AI assistant developed by aloobun and LowIQGenAI.' %}\n{%- if messages and messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + (messages[0]['content'] if 'content' in messages[0] else messages[0]['value'] if 'value' in messages[0] else '') + '<|im_end|>\\n' }}\n{%- else %}\n {{- '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}\n{%- endif %}\n{%- for message in messages %}\n {%- set content = message['content'] if 'content' in message else message['value'] if 'value' in message else '' %}\n {%- if message['role'] in ['user', 'human'] %}\n {{- '<|im_start|>user\\n' + content + '<|im_end|>\\n' }}\n {%- elif message['role'] in ['assistant', 'gpt'] %}\n {{- '<|im_start|>assistant\\n' + content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
93
+ "clean_up_tokenization_spaces": false,
94
+ "eos_token": "<|endoftext|>",
95
+ "extra_special_tokens": {},
96
+ "max_length": 2048,
97
+ "model_max_length": 8192,
98
+ "pad_to_multiple_of": null,
99
+ "pad_token": "<|endoftext|>",
100
+ "pad_token_type_id": 0,
101
+ "padding_side": "right",
102
+ "stride": 0,
103
+ "tokenizer_class": "PreTrainedTokenizerFast",
104
+ "truncation_side": "right",
105
+ "truncation_strategy": "longest_first"
106
+ }