Userb1az commited on
Commit
dceed67
·
verified ·
1 Parent(s): d922b74

Upload 14 files

Browse files
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 2,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_ctx": 2048,
14
+ "n_embd": 5120,
15
+ "n_head": 40,
16
+ "n_inner": null,
17
+ "n_layer": 40,
18
+ "n_positions": 2048,
19
+ "pad_token_id": 0,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.27.1",
31
+ "use_cache": true,
32
+ "vocab_size": 50272
33
+ }
ruGPT-3.5-13B-lora-f16.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa62bbf6fb18fe3737caf81faf1bfdd7f0cc9c53df4e22db22f056f7f39ee137
3
+ size 26301266213
ruGPT-3.5-13B-lora-f16.meta ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Gpt2",
3
+ "quantization": "F16",
4
+ "quantization_version": "Not_Quantized",
5
+ "container": "GGML",
6
+ "converter": "llm-rs",
7
+ "hash": "2eec7374bdb419e005ab85b9bb4dbebf478855c3400f92335a7f6b3a1c8e317d",
8
+ "base_model": "/root/ruGPT-3.5-training/ruGPT-3.5-13B-lora"
9
+ }
ruGPT-3.5-13B-lora-q4_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20424779cbcba555dad4a3fd3310ab2e423880ce7331a9cdefae2696dc772f4f
3
+ size 7473326373
ruGPT-3.5-13B-lora-q4_0.meta ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Gpt2",
3
+ "quantization": "Q4_0",
4
+ "quantization_version": "V2",
5
+ "container": "GGML",
6
+ "converter": "llm-rs",
7
+ "hash": "1e803e4bc312ba56bf95f24d1ab1b436852de05b0ef3fda1519568c69b3818ba",
8
+ "base_model": "/root/ruGPT-3.5-training/ruGPT-3.5-13B-lora"
9
+ }
ruGPT-3.5-13B-lora-q4_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c747a25da4bb9ac9db7e6427b4fc735151484585b7674b923794e0225aefd9aa
3
+ size 8291932453
ruGPT-3.5-13B-lora-q4_1.meta ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Gpt2",
3
+ "quantization": "Q4_1",
4
+ "quantization_version": "V2",
5
+ "container": "GGML",
6
+ "converter": "llm-rs",
7
+ "hash": "ac2d3ffe140c79b8f1a2b4acf7734418bb852913a91500222b2c871a0721f8ac",
8
+ "base_model": "/root/ruGPT-3.5-training/ruGPT-3.5-13B-lora"
9
+ }
ruGPT-3.5-13B-lora-q5_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef2e229c3bebc1e319bc40c09b53da1901f87c97455823453d31220b4da4097a
3
+ size 9110538533
ruGPT-3.5-13B-lora-q5_0.meta ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Gpt2",
3
+ "quantization": "Q5_0",
4
+ "quantization_version": "V2",
5
+ "container": "GGML",
6
+ "converter": "llm-rs",
7
+ "hash": "66f301bab99098f8091bbf011454a5d37f61f8fc5f42d8344e44cedb96a70470",
8
+ "base_model": "/root/ruGPT-3.5-training/ruGPT-3.5-13B-lora"
9
+ }
ruGPT-3.5-13B-lora-q5_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02f24b57d3d69de0c67c23b2420df03b373b840c79bdbdfa2ece94b5a21aac3d
3
+ size 9929144613
ruGPT-3.5-13B-lora-q5_1.meta ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Gpt2",
3
+ "quantization": "Q5_1",
4
+ "quantization_version": "V2",
5
+ "container": "GGML",
6
+ "converter": "llm-rs",
7
+ "hash": "adec50585d63de813304be1692143bf2bc9bc784f5cda01f045d1a0d651ff8ed",
8
+ "base_model": "/root/ruGPT-3.5-training/ruGPT-3.5-13B-lora"
9
+ }
ruGPT-3.5-13B-lora-q8_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9abcbf47cf4dc91d444e4831e88a5ddb17c78ea3211e09f0776054912ffe7b4
3
+ size 14022175013
ruGPT-3.5-13B-lora-q8_0.meta ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Gpt2",
3
+ "quantization": "Q8_0",
4
+ "quantization_version": "V2",
5
+ "container": "GGML",
6
+ "converter": "llm-rs",
7
+ "hash": "6a97dc9bc9856e765ad77f9484a521fe76618165ed6cf74121ff7058e9f45201",
8
+ "base_model": "/root/ruGPT-3.5-training/ruGPT-3.5-13B-lora"
9
+ }