Text Generation
Transformers
PyTorch
Thai
English
llama
conversational
text-generation-inference
Inference Endpoints
zolicsaki commited on
Commit
37aa6dd
1 Parent(s): 7c0cb5d

Upload folder using huggingface_hub (#1)

Browse files

- 587a742fce735114a2388c0c5d82e1f7bd7b783672fbd7eab95c5c828d38a8b1 (684bdd7165b6941657c7be2b8f79a3f90f790586)
- 45b9996088b9d85bc46066937d23fa74c339c0c4a66816b7e7b7f2e67515514a (15370476ccc48ebada6f24e82591c699448fcdfd)
- 2efa8369fc3513d50b06a2a2cbffffad8aab687d8369841f44892c23b9f081c6 (8c857447f15e65cdc2f8e03ee4c5b70161c59f78)
- 1378cd657794bc5187381025d97a3f391828fc9e03f134d6f7523c7ec671080d (9ca4ba6be5d2442301f805bc5b97f93e268d1461)
- d5cfe96cd64359d76aadbed14a93c9c1e0e2847784a63e52a2283b40d06878af (85f0bdc8ca5070bc22d6fb2e072d943c23293837)
- 76020cda4327b28bbd497a52f01c3005eabb5f801d89eef92b0b2e95430e8e4e (211473ca7a2506b26e56edbd7aa5e4b8e8588977)
- 8c7369227f88d04364a62a0d245cfaf285b90c2191fea0b9f48cdf6fd08e037c (795147e020443b84f86bce2af9acb60f7e0f83a0)
- ce714bbce60391211bf8550c02dd449124e47c272a01eb1617ac03de66d49651 (18b486545229e2b62b1298b8982d250ab4273d48)
- 07cb65bf33749b84aad18a07608adc1f498c647510b24a5819710a7da9578d46 (dc2080e12d0fe3493d6e52b7d62d6853c36cdbac)
- 8a62530122773d88e6da6bf498cbdba72f0e3af0c3e35374cca226668e0c9d96 (985279c1391c54c5f082a8894c3c6b66cde423ca)
- d6d6f49b7052019c60369ff2869341c3e75cca36771a546268bb6ebd08a3748d (eb270ab37bac155cfa22afeb9e69e0eaa6720af7)
- 674179a12cf42f6a5f065c13311e00d5da3adcefffcdda048aed146d1a60492b (b27e0536690300b7c7a43c62801d1d8dedaf9b22)
- b23ec083a0a8145e899a05add5a76d68099cda403d8e697f85209f6ad0605e4d (c7bc76686084562f4152c1ece97dd465db32dda8)
- 48fa656798dd18b2c490935ffb37a982c5a26ea8f11e59416e72ac4327ae3794 (a00e393705ea644712995ee0e43476dcf3b7c3ca)
- ef3ea56345b539bce66b3e35d98187d8b6241ecf3a688cfa1feb348fcd55b461 (16fd8af280e733f98857fec38e19ae1fa3f18041)
- 34eef8d7d8e5bf362745c0131babb4dc3e30920b970968d9c37a2c764135838b (78d43226daf8e9e6f88e9082bee28a331f63e6dc)
- 0acfa9d8e19c9e37ae9709eb48b6dffdd366d85ccc88431a906c291fae6212c2 (a9a4671088b54cad6eff401baf2c0ffefabf5601)
- 9c15fe45a32655e8331bb9d91da69c584a57cc9c54a22fe37568d1ed41c56853 (e385c22c0a55e3afe1a6e87d4edb09612f2938ba)
- 4e734706bb61514788b4d72d1389229f54c9499d629b49f2421da4f2bab19811 (ecc35b33288d3a9732868703e3042e2aeb2972cd)
- b8858dfa4595263d225e62049e306137b041ffbb7e59febf450718a971acead8 (098326d9c3a1cb630265538d90eb55ab5b8f66d3)
- 06f6fdb3064ba11288e215d1ee664ea823bc0cc067d88c2b119074f777e7cc99 (a909a14ab9c8b9c4a8f7364cca4bf1b0ae4e6553)
- e49149cc50446b39684a64b2995dc9471da8d8c052558e4790c822f0f2ba0c90 (7895755023d52fe7d179e3f2fae1e696e2d5fdd1)
- 1dcfa40609ca10bde6acccfc809d7643cbf7f92c77701f41a83a169cb4805c5c (688ad4c68eeff2a419e21f8147b498f20edc038d)
- 3fc51ccc4fe5beba177164ab3aef3e3119d597869a55cfa9530f73dc48bd3de8 (288d2cdccdb380cc33a3e953524fb03c8d6889df)
- c8c7f8a84900f548911c938f67c57e504b57ee1041fe0b4e0ee484c561898237 (343f906c7db4dff739147c26d9bc5e7521951322)
- aa70b19610a1d79b0388203e332add835836725da98079410949f26c9b7ca2d8 (5b4a8ad98afe53fe0c8cf650679ee069b33797ae)
- e3f62e8004bfce2d01ccd821660ac74daecd3709f9c703975424906de0bc43cf (58317fbd62939757e41c20eef3200e154d5e98e4)
- 3d0d9f466e9b18e0a2417b2232a3c62ecce8aa08d5f7f3f46869af4a5fcf859e (c5795116a49cca08d156303cd9e2bddfd9ad8f40)
- f689f6fa5e37e4a7fc1750c11b13cb952cb4646ba85a15680a573de44c218bed (4352d98539e4ec68cbf1c54d2d33f4169dd9849c)
- 3f9b0194ab3b838f22869690482dc9ea12acbfea9cfd1a1f1eea66adf8b04697 (076f6b84811368dd221cd1ba16d2da6722e63c19)
- 77eb56589a1829bcd5eddc7374049bcb4160a5ad8ea8a5e4a90d5d566f5d2039 (022f6c95d4eb4b64e8021ff773ba657fdc91ea9a)
- 08783997733f1da2990c98295f8b1c781add558bc64ac1a653dc27f043fd318c (b9050dedf86049ec4c044e8f7fda581ffd33a666)
- d708f283ce92d41e7b19426bc8c0ccd3079a1b79df007834026b035a920b9094 (231916c7a77d928822bb914596aeb3484ff929dc)
- 3e94263c877bcc2b324e91803b5ea9bb813e213df525f9ac784394186be7d32d (372271828e99ab17e35c1193f499e17b9d8fcb19)
- 5f470c273e6900b81892d596df248a0006b5ad94f05192ee0e4a418499eac6ba (5a1a02a88e096fc679d6d24a84514006e2dfedb7)
- 852867c1222136740e6a58591d4718292d4f9e407221290f4edf14a9c87107d4 (8a7a1ab76937be1967762e2e05c8e0cb72d2e433)

config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/import/ml-sc-scratch6/bol/thai_70b_dpo/sft_run/out_6k_2/step_630/config.json",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 8192,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 28672,
14
+ "max_position_embeddings": 4096,
15
+ "model_name": "",
16
+ "model_type": "llama",
17
+ "num_attention_heads": 64,
18
+ "num_hidden_layers": 80,
19
+ "num_key_value_heads": 8,
20
+ "pad_token_id": 0,
21
+ "pretraining_tp": 1,
22
+ "return_dict": false,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_scaling": null,
25
+ "rope_theta": 10000.0,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.31.0",
29
+ "use_cache": true,
30
+ "vocab_size": 57344
31
+ }
pytorch_model-01-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:442ade9397443cb3509bf08a6ea6444f55a0f6b185116305a1f21e99d4d5f652
3
+ size 3892352979
pytorch_model-02-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9854c7ee65ad1a756fdd1ebf9fda8456b77c18e8ef8929f52e1ab50486cf2065
3
+ size 4194411495
pytorch_model-03-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a0f15c44324c1a242446aa663fed248ee39cef707e6a35da9e412898ad9fe49
3
+ size 3892386649
pytorch_model-04-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfbfbb8e5a3d9e6af910c706949ecf44343162cf68e271540d99fc84799876c1
3
+ size 3892386649
pytorch_model-05-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac751562515eb3a54aecac8ec58b531c888bd2926a512a4d9114c575afa2a50a
3
+ size 4194411495
pytorch_model-06-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0f3e6efb9eb7889622834517132c3f49b0fffc202dc4b79c5b79845b905ac6c
3
+ size 3892386649
pytorch_model-07-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3004e2c7affa461e1fabf6bbb18325f21473585ab396c7e0b5c8b86ec40cffa0
3
+ size 3892386649
pytorch_model-08-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c493a8263cb2f8826dc26b3818eb3933ccbab5e994dc9e31a089e019668b1c90
3
+ size 4194411495
pytorch_model-09-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d153bf01b2be1585c02a53651668c556e7c250e582c4f8c609ef334f006a234
3
+ size 3892386649
pytorch_model-10-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96d0d9f6d6b333c344c0b500d7ef616fbd860c13d00141d4f2704c11a88649f9
3
+ size 3892386649
pytorch_model-11-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ab27c0bbc685bf05793b293451ba9b5af427a2b3c3721462d8d4be9ce6168e8
3
+ size 4194411589
pytorch_model-12-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5ac16996117bffaaf4c6b5ee85643edccbc0faf69b4075bb76ffeb1bc995e6d
3
+ size 3892386672
pytorch_model-13-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:121d9eaf1cbd5d22b5dfa9909d5e814b42d1cf660f1088c5f1c8d3bfba5c2c90
3
+ size 3892386672
pytorch_model-14-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4092abb0f351df95271495230ff62da3627d02c6239dd8504b10383d345c1fcf
3
+ size 4194411589
pytorch_model-15-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad649a1c37eee8af2aef1141f481c2d803c1e862fc28996d631bc67beba0c633
3
+ size 3892386672
pytorch_model-16-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4280a3254252ac10646611953f02d622bbce12e79c3adee130b3c9dfb706dcf
3
+ size 3892386672
pytorch_model-17-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5fecfb64152eff52bee7a2e250dc2d7eca6641c5ecf376f41271d617e94891a
3
+ size 4194411589
pytorch_model-18-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce0ea95505c8017e848e820fd4a539c5bbb95d31753830d5ecf69a8993449850
3
+ size 3892386672
pytorch_model-19-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8563071c25902efac88dee0c63b6a0cba73548a165e4ca2c9bed5a0b9b151c53
3
+ size 3892386672
pytorch_model-20-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b4b879a6213c2d0a07f473cadc35080f1c8a388e699c822d66945c6f81a9448
3
+ size 4194411589
pytorch_model-21-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17cf68690ae75504b35feb98d3d8b80d90086c4b1998bb42ddd2560ade3701a5
3
+ size 3892386672
pytorch_model-22-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:303993ca083f6fb11ddeaca618ed7e41aacc7a5a2c54935a3831940a4075f907
3
+ size 3892386672
pytorch_model-23-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3203d3d0358197c46b50d190c508ac34eb70ca317c32c200cbfc8d3b82ee72ba
3
+ size 4194411589
pytorch_model-24-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9d2226129cab323863cc0a0bebdca78dcf6631f2566c2c41933847eac3b5baa
3
+ size 3892386672
pytorch_model-25-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23a0f4268a4989484ceec75036b7f42f92e9651302c6d1effa1d2dc01386fd04
3
+ size 3892386672
pytorch_model-26-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:816951aef73b65e19e2122648b758c84f45632c7abbb7e31f9c08ef2b7740468
3
+ size 4194411589
pytorch_model-27-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbc132e90bc06dd091f0ac4408bf2bacc394cbc35f7ea3c2cdec09a63c891664
3
+ size 3892386672
pytorch_model-28-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2e5b9c1c6f271312f356fa6087cb46a39d63bd4e3469703f5bc676860bc67ab
3
+ size 3892386672
pytorch_model-29-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee78ef3ee2eab3a94fba5244b05dcce7538170cde0665e920d51ec3bb1befc25
3
+ size 4194411589
pytorch_model-30-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:275aafb5a38263d10d4b3e7718c1a684c855aff0d462bad7d459ec2eb9c4b0ef
3
+ size 3892386672
pytorch_model-31-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3336fbef8652347335bb9b021098c30487cf3f7b3fda65ca19fb932497703f04
3
+ size 3892386672
pytorch_model-32-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37f5348ca5f133cd88716a9756a837faa973fb1943c1f279a2e59ca088fc43ec
3
+ size 4194411589
pytorch_model-33-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:099b0b87b3c5d21f89e13f48db06226108235b20273b0d8955fca5cc44a8b051
3
+ size 3892386672
pytorch_model-34-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffc1b6b6cdb1f284276ca787e7f8b4f8a669fc8b0a11f381845b1ae47b577451
3
+ size 3892386672
pytorch_model-35-of-35.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d635f7a336770ce491e0164fda9a5f7a2c022c7486d444740afa36aa9de84813
3
+ size 3120649182
pytorch_model.bin.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {}, "weight_map": {"model.embed_tokens.weight": "pytorch_model-01-of-35.bin", "model.layers.0.self_attn.q_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.0.self_attn.k_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.0.self_attn.v_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.0.self_attn.o_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-01-of-35.bin", "model.layers.0.mlp.gate_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.0.mlp.up_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.0.mlp.down_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.0.input_layernorm.weight": "pytorch_model-01-of-35.bin", "model.layers.0.post_attention_layernorm.weight": "pytorch_model-01-of-35.bin", "model.layers.1.self_attn.q_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.1.self_attn.k_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.1.self_attn.v_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.1.self_attn.o_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-01-of-35.bin", "model.layers.1.mlp.gate_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.1.mlp.up_proj.weight": "pytorch_model-01-of-35.bin", "model.layers.1.mlp.down_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.1.input_layernorm.weight": "pytorch_model-02-of-35.bin", "model.layers.1.post_attention_layernorm.weight": "pytorch_model-02-of-35.bin", "model.layers.2.self_attn.q_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.2.self_attn.k_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.2.self_attn.v_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.2.self_attn.o_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-02-of-35.bin", "model.layers.2.mlp.gate_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.2.mlp.up_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.2.mlp.down_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.2.input_layernorm.weight": "pytorch_model-02-of-35.bin", "model.layers.2.post_attention_layernorm.weight": "pytorch_model-02-of-35.bin", "model.layers.3.self_attn.q_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.3.self_attn.k_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.3.self_attn.v_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.3.self_attn.o_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-02-of-35.bin", "model.layers.3.mlp.gate_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.3.mlp.up_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.3.mlp.down_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.3.input_layernorm.weight": "pytorch_model-02-of-35.bin", "model.layers.3.post_attention_layernorm.weight": "pytorch_model-02-of-35.bin", "model.layers.4.self_attn.q_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.4.self_attn.k_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.4.self_attn.v_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.4.self_attn.o_proj.weight": "pytorch_model-02-of-35.bin", "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-02-of-35.bin", "model.layers.4.mlp.gate_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.4.mlp.up_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.4.mlp.down_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.4.input_layernorm.weight": "pytorch_model-03-of-35.bin", "model.layers.4.post_attention_layernorm.weight": "pytorch_model-03-of-35.bin", "model.layers.5.self_attn.q_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.5.self_attn.k_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.5.self_attn.v_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.5.self_attn.o_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-03-of-35.bin", "model.layers.5.mlp.gate_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.5.mlp.up_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.5.mlp.down_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.5.input_layernorm.weight": "pytorch_model-03-of-35.bin", "model.layers.5.post_attention_layernorm.weight": "pytorch_model-03-of-35.bin", "model.layers.6.self_attn.q_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.6.self_attn.k_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.6.self_attn.v_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.6.self_attn.o_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-03-of-35.bin", "model.layers.6.mlp.gate_proj.weight": "pytorch_model-03-of-35.bin", "model.layers.6.mlp.up_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.6.mlp.down_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.6.input_layernorm.weight": "pytorch_model-04-of-35.bin", "model.layers.6.post_attention_layernorm.weight": "pytorch_model-04-of-35.bin", "model.layers.7.self_attn.q_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.7.self_attn.k_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.7.self_attn.v_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.7.self_attn.o_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-04-of-35.bin", "model.layers.7.mlp.gate_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.7.mlp.up_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.7.mlp.down_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.7.input_layernorm.weight": "pytorch_model-04-of-35.bin", "model.layers.7.post_attention_layernorm.weight": "pytorch_model-04-of-35.bin", "model.layers.8.self_attn.q_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.8.self_attn.k_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.8.self_attn.v_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.8.self_attn.o_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-04-of-35.bin", "model.layers.8.mlp.gate_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.8.mlp.up_proj.weight": "pytorch_model-04-of-35.bin", "model.layers.8.mlp.down_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.8.input_layernorm.weight": "pytorch_model-05-of-35.bin", "model.layers.8.post_attention_layernorm.weight": "pytorch_model-05-of-35.bin", "model.layers.9.self_attn.q_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.9.self_attn.k_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.9.self_attn.v_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.9.self_attn.o_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-05-of-35.bin", "model.layers.9.mlp.gate_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.9.mlp.up_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.9.mlp.down_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.9.input_layernorm.weight": "pytorch_model-05-of-35.bin", "model.layers.9.post_attention_layernorm.weight": "pytorch_model-05-of-35.bin", "model.layers.10.self_attn.q_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.10.self_attn.k_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.10.self_attn.v_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.10.self_attn.o_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-05-of-35.bin", "model.layers.10.mlp.gate_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.10.mlp.up_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.10.mlp.down_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.10.input_layernorm.weight": "pytorch_model-05-of-35.bin", "model.layers.10.post_attention_layernorm.weight": "pytorch_model-05-of-35.bin", "model.layers.11.self_attn.q_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.11.self_attn.k_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.11.self_attn.v_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.11.self_attn.o_proj.weight": "pytorch_model-05-of-35.bin", "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-05-of-35.bin", "model.layers.11.mlp.gate_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.11.mlp.up_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.11.mlp.down_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.11.input_layernorm.weight": "pytorch_model-06-of-35.bin", "model.layers.11.post_attention_layernorm.weight": "pytorch_model-06-of-35.bin", "model.layers.12.self_attn.q_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.12.self_attn.k_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.12.self_attn.v_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.12.self_attn.o_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-06-of-35.bin", "model.layers.12.mlp.gate_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.12.mlp.up_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.12.mlp.down_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.12.input_layernorm.weight": "pytorch_model-06-of-35.bin", "model.layers.12.post_attention_layernorm.weight": "pytorch_model-06-of-35.bin", "model.layers.13.self_attn.q_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.13.self_attn.k_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.13.self_attn.v_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.13.self_attn.o_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-06-of-35.bin", "model.layers.13.mlp.gate_proj.weight": "pytorch_model-06-of-35.bin", "model.layers.13.mlp.up_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.13.mlp.down_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.13.input_layernorm.weight": "pytorch_model-07-of-35.bin", "model.layers.13.post_attention_layernorm.weight": "pytorch_model-07-of-35.bin", "model.layers.14.self_attn.q_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.14.self_attn.k_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.14.self_attn.v_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.14.self_attn.o_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-07-of-35.bin", "model.layers.14.mlp.gate_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.14.mlp.up_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.14.mlp.down_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.14.input_layernorm.weight": "pytorch_model-07-of-35.bin", "model.layers.14.post_attention_layernorm.weight": "pytorch_model-07-of-35.bin", "model.layers.15.self_attn.q_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.15.self_attn.k_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.15.self_attn.v_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.15.self_attn.o_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-07-of-35.bin", "model.layers.15.mlp.gate_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.15.mlp.up_proj.weight": "pytorch_model-07-of-35.bin", "model.layers.15.mlp.down_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.15.input_layernorm.weight": "pytorch_model-08-of-35.bin", "model.layers.15.post_attention_layernorm.weight": "pytorch_model-08-of-35.bin", "model.layers.16.self_attn.q_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.16.self_attn.k_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.16.self_attn.v_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.16.self_attn.o_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-08-of-35.bin", "model.layers.16.mlp.gate_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.16.mlp.up_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.16.mlp.down_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.16.input_layernorm.weight": "pytorch_model-08-of-35.bin", "model.layers.16.post_attention_layernorm.weight": "pytorch_model-08-of-35.bin", "model.layers.17.self_attn.q_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.17.self_attn.k_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.17.self_attn.v_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.17.self_attn.o_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-08-of-35.bin", "model.layers.17.mlp.gate_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.17.mlp.up_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.17.mlp.down_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.17.input_layernorm.weight": "pytorch_model-08-of-35.bin", "model.layers.17.post_attention_layernorm.weight": "pytorch_model-08-of-35.bin", "model.layers.18.self_attn.q_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.18.self_attn.k_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.18.self_attn.v_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.18.self_attn.o_proj.weight": "pytorch_model-08-of-35.bin", "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-08-of-35.bin", "model.layers.18.mlp.gate_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.18.mlp.up_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.18.mlp.down_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.18.input_layernorm.weight": "pytorch_model-09-of-35.bin", "model.layers.18.post_attention_layernorm.weight": "pytorch_model-09-of-35.bin", "model.layers.19.self_attn.q_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.19.self_attn.k_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.19.self_attn.v_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.19.self_attn.o_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-09-of-35.bin", "model.layers.19.mlp.gate_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.19.mlp.up_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.19.mlp.down_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.19.input_layernorm.weight": "pytorch_model-09-of-35.bin", "model.layers.19.post_attention_layernorm.weight": "pytorch_model-09-of-35.bin", "model.layers.20.self_attn.q_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.20.self_attn.k_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.20.self_attn.v_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.20.self_attn.o_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-09-of-35.bin", "model.layers.20.mlp.gate_proj.weight": "pytorch_model-09-of-35.bin", "model.layers.20.mlp.up_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.20.mlp.down_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.20.input_layernorm.weight": "pytorch_model-10-of-35.bin", "model.layers.20.post_attention_layernorm.weight": "pytorch_model-10-of-35.bin", "model.layers.21.self_attn.q_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.21.self_attn.k_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.21.self_attn.v_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.21.self_attn.o_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-10-of-35.bin", "model.layers.21.mlp.gate_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.21.mlp.up_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.21.mlp.down_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.21.input_layernorm.weight": "pytorch_model-10-of-35.bin", "model.layers.21.post_attention_layernorm.weight": "pytorch_model-10-of-35.bin", "model.layers.22.self_attn.q_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.22.self_attn.k_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.22.self_attn.v_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.22.self_attn.o_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-10-of-35.bin", "model.layers.22.mlp.gate_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.22.mlp.up_proj.weight": "pytorch_model-10-of-35.bin", "model.layers.22.mlp.down_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.22.input_layernorm.weight": "pytorch_model-11-of-35.bin", "model.layers.22.post_attention_layernorm.weight": "pytorch_model-11-of-35.bin", "model.layers.23.self_attn.q_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.23.self_attn.k_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.23.self_attn.v_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.23.self_attn.o_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-11-of-35.bin", "model.layers.23.mlp.gate_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.23.mlp.up_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.23.mlp.down_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.23.input_layernorm.weight": "pytorch_model-11-of-35.bin", "model.layers.23.post_attention_layernorm.weight": "pytorch_model-11-of-35.bin", "model.layers.24.self_attn.q_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.24.self_attn.k_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.24.self_attn.v_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.24.self_attn.o_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-11-of-35.bin", "model.layers.24.mlp.gate_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.24.mlp.up_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.24.mlp.down_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.24.input_layernorm.weight": "pytorch_model-11-of-35.bin", "model.layers.24.post_attention_layernorm.weight": "pytorch_model-11-of-35.bin", "model.layers.25.self_attn.q_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.25.self_attn.k_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.25.self_attn.v_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.25.self_attn.o_proj.weight": "pytorch_model-11-of-35.bin", "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-11-of-35.bin", "model.layers.25.mlp.gate_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.25.mlp.up_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.25.mlp.down_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.25.input_layernorm.weight": "pytorch_model-12-of-35.bin", "model.layers.25.post_attention_layernorm.weight": "pytorch_model-12-of-35.bin", "model.layers.26.self_attn.q_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.26.self_attn.k_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.26.self_attn.v_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.26.self_attn.o_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-12-of-35.bin", "model.layers.26.mlp.gate_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.26.mlp.up_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.26.mlp.down_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.26.input_layernorm.weight": "pytorch_model-12-of-35.bin", "model.layers.26.post_attention_layernorm.weight": "pytorch_model-12-of-35.bin", "model.layers.27.self_attn.q_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.27.self_attn.k_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.27.self_attn.v_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.27.self_attn.o_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-12-of-35.bin", "model.layers.27.mlp.gate_proj.weight": "pytorch_model-12-of-35.bin", "model.layers.27.mlp.up_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.27.mlp.down_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.27.input_layernorm.weight": "pytorch_model-13-of-35.bin", "model.layers.27.post_attention_layernorm.weight": "pytorch_model-13-of-35.bin", "model.layers.28.self_attn.q_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.28.self_attn.k_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.28.self_attn.v_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.28.self_attn.o_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-13-of-35.bin", "model.layers.28.mlp.gate_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.28.mlp.up_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.28.mlp.down_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.28.input_layernorm.weight": "pytorch_model-13-of-35.bin", "model.layers.28.post_attention_layernorm.weight": "pytorch_model-13-of-35.bin", "model.layers.29.self_attn.q_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.29.self_attn.k_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.29.self_attn.v_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.29.self_attn.o_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-13-of-35.bin", "model.layers.29.mlp.gate_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.29.mlp.up_proj.weight": "pytorch_model-13-of-35.bin", "model.layers.29.mlp.down_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.29.input_layernorm.weight": "pytorch_model-14-of-35.bin", "model.layers.29.post_attention_layernorm.weight": "pytorch_model-14-of-35.bin", "model.layers.30.self_attn.q_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.30.self_attn.k_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.30.self_attn.v_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.30.self_attn.o_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-14-of-35.bin", "model.layers.30.mlp.gate_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.30.mlp.up_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.30.mlp.down_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.30.input_layernorm.weight": "pytorch_model-14-of-35.bin", "model.layers.30.post_attention_layernorm.weight": "pytorch_model-14-of-35.bin", "model.layers.31.self_attn.q_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.31.self_attn.k_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.31.self_attn.v_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.31.self_attn.o_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-14-of-35.bin", "model.layers.31.mlp.gate_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.31.mlp.up_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.31.mlp.down_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.31.input_layernorm.weight": "pytorch_model-14-of-35.bin", "model.layers.31.post_attention_layernorm.weight": "pytorch_model-14-of-35.bin", "model.layers.32.self_attn.q_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.32.self_attn.k_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.32.self_attn.v_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.32.self_attn.o_proj.weight": "pytorch_model-14-of-35.bin", "model.layers.32.self_attn.rotary_emb.inv_freq": "pytorch_model-14-of-35.bin", "model.layers.32.mlp.gate_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.32.mlp.up_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.32.mlp.down_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.32.input_layernorm.weight": "pytorch_model-15-of-35.bin", "model.layers.32.post_attention_layernorm.weight": "pytorch_model-15-of-35.bin", "model.layers.33.self_attn.q_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.33.self_attn.k_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.33.self_attn.v_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.33.self_attn.o_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.33.self_attn.rotary_emb.inv_freq": "pytorch_model-15-of-35.bin", "model.layers.33.mlp.gate_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.33.mlp.up_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.33.mlp.down_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.33.input_layernorm.weight": "pytorch_model-15-of-35.bin", "model.layers.33.post_attention_layernorm.weight": "pytorch_model-15-of-35.bin", "model.layers.34.self_attn.q_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.34.self_attn.k_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.34.self_attn.v_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.34.self_attn.o_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.34.self_attn.rotary_emb.inv_freq": "pytorch_model-15-of-35.bin", "model.layers.34.mlp.gate_proj.weight": "pytorch_model-15-of-35.bin", "model.layers.34.mlp.up_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.34.mlp.down_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.34.input_layernorm.weight": "pytorch_model-16-of-35.bin", "model.layers.34.post_attention_layernorm.weight": "pytorch_model-16-of-35.bin", "model.layers.35.self_attn.q_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.35.self_attn.k_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.35.self_attn.v_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.35.self_attn.o_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.35.self_attn.rotary_emb.inv_freq": "pytorch_model-16-of-35.bin", "model.layers.35.mlp.gate_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.35.mlp.up_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.35.mlp.down_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.35.input_layernorm.weight": "pytorch_model-16-of-35.bin", "model.layers.35.post_attention_layernorm.weight": "pytorch_model-16-of-35.bin", "model.layers.36.self_attn.q_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.36.self_attn.k_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.36.self_attn.v_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.36.self_attn.o_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.36.self_attn.rotary_emb.inv_freq": "pytorch_model-16-of-35.bin", "model.layers.36.mlp.gate_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.36.mlp.up_proj.weight": "pytorch_model-16-of-35.bin", "model.layers.36.mlp.down_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.36.input_layernorm.weight": "pytorch_model-17-of-35.bin", "model.layers.36.post_attention_layernorm.weight": "pytorch_model-17-of-35.bin", "model.layers.37.self_attn.q_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.37.self_attn.k_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.37.self_attn.v_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.37.self_attn.o_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.37.self_attn.rotary_emb.inv_freq": "pytorch_model-17-of-35.bin", "model.layers.37.mlp.gate_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.37.mlp.up_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.37.mlp.down_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.37.input_layernorm.weight": "pytorch_model-17-of-35.bin", "model.layers.37.post_attention_layernorm.weight": "pytorch_model-17-of-35.bin", "model.layers.38.self_attn.q_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.38.self_attn.k_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.38.self_attn.v_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.38.self_attn.o_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.38.self_attn.rotary_emb.inv_freq": "pytorch_model-17-of-35.bin", "model.layers.38.mlp.gate_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.38.mlp.up_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.38.mlp.down_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.38.input_layernorm.weight": "pytorch_model-17-of-35.bin", "model.layers.38.post_attention_layernorm.weight": "pytorch_model-17-of-35.bin", "model.layers.39.self_attn.q_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.39.self_attn.k_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.39.self_attn.v_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.39.self_attn.o_proj.weight": "pytorch_model-17-of-35.bin", "model.layers.39.self_attn.rotary_emb.inv_freq": "pytorch_model-17-of-35.bin", "model.layers.39.mlp.gate_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.39.mlp.up_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.39.mlp.down_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.39.input_layernorm.weight": "pytorch_model-18-of-35.bin", "model.layers.39.post_attention_layernorm.weight": "pytorch_model-18-of-35.bin", "model.layers.40.self_attn.q_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.40.self_attn.k_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.40.self_attn.v_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.40.self_attn.o_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.40.self_attn.rotary_emb.inv_freq": "pytorch_model-18-of-35.bin", "model.layers.40.mlp.gate_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.40.mlp.up_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.40.mlp.down_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.40.input_layernorm.weight": "pytorch_model-18-of-35.bin", "model.layers.40.post_attention_layernorm.weight": "pytorch_model-18-of-35.bin", "model.layers.41.self_attn.q_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.41.self_attn.k_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.41.self_attn.v_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.41.self_attn.o_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.41.self_attn.rotary_emb.inv_freq": "pytorch_model-18-of-35.bin", "model.layers.41.mlp.gate_proj.weight": "pytorch_model-18-of-35.bin", "model.layers.41.mlp.up_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.41.mlp.down_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.41.input_layernorm.weight": "pytorch_model-19-of-35.bin", "model.layers.41.post_attention_layernorm.weight": "pytorch_model-19-of-35.bin", "model.layers.42.self_attn.q_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.42.self_attn.k_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.42.self_attn.v_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.42.self_attn.o_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.42.self_attn.rotary_emb.inv_freq": "pytorch_model-19-of-35.bin", "model.layers.42.mlp.gate_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.42.mlp.up_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.42.mlp.down_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.42.input_layernorm.weight": "pytorch_model-19-of-35.bin", "model.layers.42.post_attention_layernorm.weight": "pytorch_model-19-of-35.bin", "model.layers.43.self_attn.q_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.43.self_attn.k_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.43.self_attn.v_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.43.self_attn.o_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.43.self_attn.rotary_emb.inv_freq": "pytorch_model-19-of-35.bin", "model.layers.43.mlp.gate_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.43.mlp.up_proj.weight": "pytorch_model-19-of-35.bin", "model.layers.43.mlp.down_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.43.input_layernorm.weight": "pytorch_model-20-of-35.bin", "model.layers.43.post_attention_layernorm.weight": "pytorch_model-20-of-35.bin", "model.layers.44.self_attn.q_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.44.self_attn.k_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.44.self_attn.v_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.44.self_attn.o_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.44.self_attn.rotary_emb.inv_freq": "pytorch_model-20-of-35.bin", "model.layers.44.mlp.gate_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.44.mlp.up_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.44.mlp.down_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.44.input_layernorm.weight": "pytorch_model-20-of-35.bin", "model.layers.44.post_attention_layernorm.weight": "pytorch_model-20-of-35.bin", "model.layers.45.self_attn.q_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.45.self_attn.k_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.45.self_attn.v_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.45.self_attn.o_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.45.self_attn.rotary_emb.inv_freq": "pytorch_model-20-of-35.bin", "model.layers.45.mlp.gate_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.45.mlp.up_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.45.mlp.down_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.45.input_layernorm.weight": "pytorch_model-20-of-35.bin", "model.layers.45.post_attention_layernorm.weight": "pytorch_model-20-of-35.bin", "model.layers.46.self_attn.q_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.46.self_attn.k_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.46.self_attn.v_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.46.self_attn.o_proj.weight": "pytorch_model-20-of-35.bin", "model.layers.46.self_attn.rotary_emb.inv_freq": "pytorch_model-20-of-35.bin", "model.layers.46.mlp.gate_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.46.mlp.up_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.46.mlp.down_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.46.input_layernorm.weight": "pytorch_model-21-of-35.bin", "model.layers.46.post_attention_layernorm.weight": "pytorch_model-21-of-35.bin", "model.layers.47.self_attn.q_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.47.self_attn.k_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.47.self_attn.v_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.47.self_attn.o_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.47.self_attn.rotary_emb.inv_freq": "pytorch_model-21-of-35.bin", "model.layers.47.mlp.gate_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.47.mlp.up_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.47.mlp.down_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.47.input_layernorm.weight": "pytorch_model-21-of-35.bin", "model.layers.47.post_attention_layernorm.weight": "pytorch_model-21-of-35.bin", "model.layers.48.self_attn.q_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.48.self_attn.k_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.48.self_attn.v_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.48.self_attn.o_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.48.self_attn.rotary_emb.inv_freq": "pytorch_model-21-of-35.bin", "model.layers.48.mlp.gate_proj.weight": "pytorch_model-21-of-35.bin", "model.layers.48.mlp.up_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.48.mlp.down_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.48.input_layernorm.weight": "pytorch_model-22-of-35.bin", "model.layers.48.post_attention_layernorm.weight": "pytorch_model-22-of-35.bin", "model.layers.49.self_attn.q_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.49.self_attn.k_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.49.self_attn.v_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.49.self_attn.o_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.49.self_attn.rotary_emb.inv_freq": "pytorch_model-22-of-35.bin", "model.layers.49.mlp.gate_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.49.mlp.up_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.49.mlp.down_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.49.input_layernorm.weight": "pytorch_model-22-of-35.bin", "model.layers.49.post_attention_layernorm.weight": "pytorch_model-22-of-35.bin", "model.layers.50.self_attn.q_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.50.self_attn.k_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.50.self_attn.v_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.50.self_attn.o_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.50.self_attn.rotary_emb.inv_freq": "pytorch_model-22-of-35.bin", "model.layers.50.mlp.gate_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.50.mlp.up_proj.weight": "pytorch_model-22-of-35.bin", "model.layers.50.mlp.down_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.50.input_layernorm.weight": "pytorch_model-23-of-35.bin", "model.layers.50.post_attention_layernorm.weight": "pytorch_model-23-of-35.bin", "model.layers.51.self_attn.q_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.51.self_attn.k_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.51.self_attn.v_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.51.self_attn.o_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.51.self_attn.rotary_emb.inv_freq": "pytorch_model-23-of-35.bin", "model.layers.51.mlp.gate_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.51.mlp.up_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.51.mlp.down_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.51.input_layernorm.weight": "pytorch_model-23-of-35.bin", "model.layers.51.post_attention_layernorm.weight": "pytorch_model-23-of-35.bin", "model.layers.52.self_attn.q_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.52.self_attn.k_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.52.self_attn.v_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.52.self_attn.o_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.52.self_attn.rotary_emb.inv_freq": "pytorch_model-23-of-35.bin", "model.layers.52.mlp.gate_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.52.mlp.up_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.52.mlp.down_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.52.input_layernorm.weight": "pytorch_model-23-of-35.bin", "model.layers.52.post_attention_layernorm.weight": "pytorch_model-23-of-35.bin", "model.layers.53.self_attn.q_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.53.self_attn.k_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.53.self_attn.v_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.53.self_attn.o_proj.weight": "pytorch_model-23-of-35.bin", "model.layers.53.self_attn.rotary_emb.inv_freq": "pytorch_model-23-of-35.bin", "model.layers.53.mlp.gate_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.53.mlp.up_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.53.mlp.down_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.53.input_layernorm.weight": "pytorch_model-24-of-35.bin", "model.layers.53.post_attention_layernorm.weight": "pytorch_model-24-of-35.bin", "model.layers.54.self_attn.q_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.54.self_attn.k_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.54.self_attn.v_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.54.self_attn.o_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.54.self_attn.rotary_emb.inv_freq": "pytorch_model-24-of-35.bin", "model.layers.54.mlp.gate_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.54.mlp.up_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.54.mlp.down_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.54.input_layernorm.weight": "pytorch_model-24-of-35.bin", "model.layers.54.post_attention_layernorm.weight": "pytorch_model-24-of-35.bin", "model.layers.55.self_attn.q_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.55.self_attn.k_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.55.self_attn.v_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.55.self_attn.o_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.55.self_attn.rotary_emb.inv_freq": "pytorch_model-24-of-35.bin", "model.layers.55.mlp.gate_proj.weight": "pytorch_model-24-of-35.bin", "model.layers.55.mlp.up_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.55.mlp.down_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.55.input_layernorm.weight": "pytorch_model-25-of-35.bin", "model.layers.55.post_attention_layernorm.weight": "pytorch_model-25-of-35.bin", "model.layers.56.self_attn.q_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.56.self_attn.k_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.56.self_attn.v_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.56.self_attn.o_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.56.self_attn.rotary_emb.inv_freq": "pytorch_model-25-of-35.bin", "model.layers.56.mlp.gate_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.56.mlp.up_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.56.mlp.down_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.56.input_layernorm.weight": "pytorch_model-25-of-35.bin", "model.layers.56.post_attention_layernorm.weight": "pytorch_model-25-of-35.bin", "model.layers.57.self_attn.q_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.57.self_attn.k_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.57.self_attn.v_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.57.self_attn.o_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.57.self_attn.rotary_emb.inv_freq": "pytorch_model-25-of-35.bin", "model.layers.57.mlp.gate_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.57.mlp.up_proj.weight": "pytorch_model-25-of-35.bin", "model.layers.57.mlp.down_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.57.input_layernorm.weight": "pytorch_model-26-of-35.bin", "model.layers.57.post_attention_layernorm.weight": "pytorch_model-26-of-35.bin", "model.layers.58.self_attn.q_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.58.self_attn.k_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.58.self_attn.v_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.58.self_attn.o_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.58.self_attn.rotary_emb.inv_freq": "pytorch_model-26-of-35.bin", "model.layers.58.mlp.gate_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.58.mlp.up_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.58.mlp.down_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.58.input_layernorm.weight": "pytorch_model-26-of-35.bin", "model.layers.58.post_attention_layernorm.weight": "pytorch_model-26-of-35.bin", "model.layers.59.self_attn.q_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.59.self_attn.k_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.59.self_attn.v_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.59.self_attn.o_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.59.self_attn.rotary_emb.inv_freq": "pytorch_model-26-of-35.bin", "model.layers.59.mlp.gate_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.59.mlp.up_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.59.mlp.down_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.59.input_layernorm.weight": "pytorch_model-26-of-35.bin", "model.layers.59.post_attention_layernorm.weight": "pytorch_model-26-of-35.bin", "model.layers.60.self_attn.q_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.60.self_attn.k_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.60.self_attn.v_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.60.self_attn.o_proj.weight": "pytorch_model-26-of-35.bin", "model.layers.60.self_attn.rotary_emb.inv_freq": "pytorch_model-26-of-35.bin", "model.layers.60.mlp.gate_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.60.mlp.up_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.60.mlp.down_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.60.input_layernorm.weight": "pytorch_model-27-of-35.bin", "model.layers.60.post_attention_layernorm.weight": "pytorch_model-27-of-35.bin", "model.layers.61.self_attn.q_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.61.self_attn.k_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.61.self_attn.v_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.61.self_attn.o_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.61.self_attn.rotary_emb.inv_freq": "pytorch_model-27-of-35.bin", "model.layers.61.mlp.gate_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.61.mlp.up_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.61.mlp.down_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.61.input_layernorm.weight": "pytorch_model-27-of-35.bin", "model.layers.61.post_attention_layernorm.weight": "pytorch_model-27-of-35.bin", "model.layers.62.self_attn.q_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.62.self_attn.k_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.62.self_attn.v_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.62.self_attn.o_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.62.self_attn.rotary_emb.inv_freq": "pytorch_model-27-of-35.bin", "model.layers.62.mlp.gate_proj.weight": "pytorch_model-27-of-35.bin", "model.layers.62.mlp.up_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.62.mlp.down_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.62.input_layernorm.weight": "pytorch_model-28-of-35.bin", "model.layers.62.post_attention_layernorm.weight": "pytorch_model-28-of-35.bin", "model.layers.63.self_attn.q_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.63.self_attn.k_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.63.self_attn.v_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.63.self_attn.o_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.63.self_attn.rotary_emb.inv_freq": "pytorch_model-28-of-35.bin", "model.layers.63.mlp.gate_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.63.mlp.up_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.63.mlp.down_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.63.input_layernorm.weight": "pytorch_model-28-of-35.bin", "model.layers.63.post_attention_layernorm.weight": "pytorch_model-28-of-35.bin", "model.layers.64.self_attn.q_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.64.self_attn.k_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.64.self_attn.v_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.64.self_attn.o_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.64.self_attn.rotary_emb.inv_freq": "pytorch_model-28-of-35.bin", "model.layers.64.mlp.gate_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.64.mlp.up_proj.weight": "pytorch_model-28-of-35.bin", "model.layers.64.mlp.down_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.64.input_layernorm.weight": "pytorch_model-29-of-35.bin", "model.layers.64.post_attention_layernorm.weight": "pytorch_model-29-of-35.bin", "model.layers.65.self_attn.q_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.65.self_attn.k_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.65.self_attn.v_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.65.self_attn.o_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.65.self_attn.rotary_emb.inv_freq": "pytorch_model-29-of-35.bin", "model.layers.65.mlp.gate_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.65.mlp.up_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.65.mlp.down_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.65.input_layernorm.weight": "pytorch_model-29-of-35.bin", "model.layers.65.post_attention_layernorm.weight": "pytorch_model-29-of-35.bin", "model.layers.66.self_attn.q_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.66.self_attn.k_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.66.self_attn.v_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.66.self_attn.o_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.66.self_attn.rotary_emb.inv_freq": "pytorch_model-29-of-35.bin", "model.layers.66.mlp.gate_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.66.mlp.up_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.66.mlp.down_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.66.input_layernorm.weight": "pytorch_model-29-of-35.bin", "model.layers.66.post_attention_layernorm.weight": "pytorch_model-29-of-35.bin", "model.layers.67.self_attn.q_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.67.self_attn.k_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.67.self_attn.v_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.67.self_attn.o_proj.weight": "pytorch_model-29-of-35.bin", "model.layers.67.self_attn.rotary_emb.inv_freq": "pytorch_model-29-of-35.bin", "model.layers.67.mlp.gate_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.67.mlp.up_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.67.mlp.down_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.67.input_layernorm.weight": "pytorch_model-30-of-35.bin", "model.layers.67.post_attention_layernorm.weight": "pytorch_model-30-of-35.bin", "model.layers.68.self_attn.q_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.68.self_attn.k_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.68.self_attn.v_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.68.self_attn.o_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.68.self_attn.rotary_emb.inv_freq": "pytorch_model-30-of-35.bin", "model.layers.68.mlp.gate_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.68.mlp.up_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.68.mlp.down_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.68.input_layernorm.weight": "pytorch_model-30-of-35.bin", "model.layers.68.post_attention_layernorm.weight": "pytorch_model-30-of-35.bin", "model.layers.69.self_attn.q_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.69.self_attn.k_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.69.self_attn.v_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.69.self_attn.o_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.69.self_attn.rotary_emb.inv_freq": "pytorch_model-30-of-35.bin", "model.layers.69.mlp.gate_proj.weight": "pytorch_model-30-of-35.bin", "model.layers.69.mlp.up_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.69.mlp.down_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.69.input_layernorm.weight": "pytorch_model-31-of-35.bin", "model.layers.69.post_attention_layernorm.weight": "pytorch_model-31-of-35.bin", "model.layers.70.self_attn.q_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.70.self_attn.k_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.70.self_attn.v_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.70.self_attn.o_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.70.self_attn.rotary_emb.inv_freq": "pytorch_model-31-of-35.bin", "model.layers.70.mlp.gate_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.70.mlp.up_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.70.mlp.down_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.70.input_layernorm.weight": "pytorch_model-31-of-35.bin", "model.layers.70.post_attention_layernorm.weight": "pytorch_model-31-of-35.bin", "model.layers.71.self_attn.q_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.71.self_attn.k_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.71.self_attn.v_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.71.self_attn.o_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.71.self_attn.rotary_emb.inv_freq": "pytorch_model-31-of-35.bin", "model.layers.71.mlp.gate_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.71.mlp.up_proj.weight": "pytorch_model-31-of-35.bin", "model.layers.71.mlp.down_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.71.input_layernorm.weight": "pytorch_model-32-of-35.bin", "model.layers.71.post_attention_layernorm.weight": "pytorch_model-32-of-35.bin", "model.layers.72.self_attn.q_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.72.self_attn.k_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.72.self_attn.v_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.72.self_attn.o_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.72.self_attn.rotary_emb.inv_freq": "pytorch_model-32-of-35.bin", "model.layers.72.mlp.gate_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.72.mlp.up_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.72.mlp.down_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.72.input_layernorm.weight": "pytorch_model-32-of-35.bin", "model.layers.72.post_attention_layernorm.weight": "pytorch_model-32-of-35.bin", "model.layers.73.self_attn.q_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.73.self_attn.k_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.73.self_attn.v_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.73.self_attn.o_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.73.self_attn.rotary_emb.inv_freq": "pytorch_model-32-of-35.bin", "model.layers.73.mlp.gate_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.73.mlp.up_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.73.mlp.down_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.73.input_layernorm.weight": "pytorch_model-32-of-35.bin", "model.layers.73.post_attention_layernorm.weight": "pytorch_model-32-of-35.bin", "model.layers.74.self_attn.q_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.74.self_attn.k_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.74.self_attn.v_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.74.self_attn.o_proj.weight": "pytorch_model-32-of-35.bin", "model.layers.74.self_attn.rotary_emb.inv_freq": "pytorch_model-32-of-35.bin", "model.layers.74.mlp.gate_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.74.mlp.up_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.74.mlp.down_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.74.input_layernorm.weight": "pytorch_model-33-of-35.bin", "model.layers.74.post_attention_layernorm.weight": "pytorch_model-33-of-35.bin", "model.layers.75.self_attn.q_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.75.self_attn.k_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.75.self_attn.v_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.75.self_attn.o_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.75.self_attn.rotary_emb.inv_freq": "pytorch_model-33-of-35.bin", "model.layers.75.mlp.gate_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.75.mlp.up_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.75.mlp.down_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.75.input_layernorm.weight": "pytorch_model-33-of-35.bin", "model.layers.75.post_attention_layernorm.weight": "pytorch_model-33-of-35.bin", "model.layers.76.self_attn.q_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.76.self_attn.k_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.76.self_attn.v_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.76.self_attn.o_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.76.self_attn.rotary_emb.inv_freq": "pytorch_model-33-of-35.bin", "model.layers.76.mlp.gate_proj.weight": "pytorch_model-33-of-35.bin", "model.layers.76.mlp.up_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.76.mlp.down_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.76.input_layernorm.weight": "pytorch_model-34-of-35.bin", "model.layers.76.post_attention_layernorm.weight": "pytorch_model-34-of-35.bin", "model.layers.77.self_attn.q_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.77.self_attn.k_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.77.self_attn.v_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.77.self_attn.o_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.77.self_attn.rotary_emb.inv_freq": "pytorch_model-34-of-35.bin", "model.layers.77.mlp.gate_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.77.mlp.up_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.77.mlp.down_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.77.input_layernorm.weight": "pytorch_model-34-of-35.bin", "model.layers.77.post_attention_layernorm.weight": "pytorch_model-34-of-35.bin", "model.layers.78.self_attn.q_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.78.self_attn.k_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.78.self_attn.v_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.78.self_attn.o_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.78.self_attn.rotary_emb.inv_freq": "pytorch_model-34-of-35.bin", "model.layers.78.mlp.gate_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.78.mlp.up_proj.weight": "pytorch_model-34-of-35.bin", "model.layers.78.mlp.down_proj.weight": "pytorch_model-35-of-35.bin", "model.layers.78.input_layernorm.weight": "pytorch_model-35-of-35.bin", "model.layers.78.post_attention_layernorm.weight": "pytorch_model-35-of-35.bin", "model.layers.79.self_attn.q_proj.weight": "pytorch_model-35-of-35.bin", "model.layers.79.self_attn.k_proj.weight": "pytorch_model-35-of-35.bin", "model.layers.79.self_attn.v_proj.weight": "pytorch_model-35-of-35.bin", "model.layers.79.self_attn.o_proj.weight": "pytorch_model-35-of-35.bin", "model.layers.79.self_attn.rotary_emb.inv_freq": "pytorch_model-35-of-35.bin", "model.layers.79.mlp.gate_proj.weight": "pytorch_model-35-of-35.bin", "model.layers.79.mlp.up_proj.weight": "pytorch_model-35-of-35.bin", "model.layers.79.mlp.down_proj.weight": "pytorch_model-35-of-35.bin", "model.layers.79.input_layernorm.weight": "pytorch_model-35-of-35.bin", "model.layers.79.post_attention_layernorm.weight": "pytorch_model-35-of-35.bin", "model.norm.weight": "pytorch_model-35-of-35.bin", "lm_head.weight": "pytorch_model-35-of-35.bin"}}
pytorch_model.bin.sambatensor_index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model.embed_tokens.weight": "llamaforcausallm__model__embed_tokens__weight", "model.layers.0.self_attn.q_proj.weight": "llamaforcausallm__model__layers__0__self_attn__q_proj__weight", "model.layers.0.self_attn.k_proj.weight": "llamaforcausallm__model__layers__0__self_attn__k_proj__weight", "model.layers.0.self_attn.v_proj.weight": "llamaforcausallm__model__layers__0__self_attn__v_proj__weight", "model.layers.0.self_attn.o_proj.weight": "llamaforcausallm__model__layers__0__self_attn__o_proj__weight", "model.layers.0.mlp.gate_proj.weight": "llamaforcausallm__model__layers__0__mlp__gate_proj__weight", "model.layers.0.mlp.up_proj.weight": "llamaforcausallm__model__layers__0__mlp__up_proj__weight", "model.layers.0.mlp.down_proj.weight": "llamaforcausallm__model__layers__0__mlp__down_proj__weight", "model.layers.0.input_layernorm.weight": "llamaforcausallm__model__layers__0__input_layernorm__weight", "model.layers.0.post_attention_layernorm.weight": "llamaforcausallm__model__layers__0__post_attention_layernorm__weight", "model.layers.1.self_attn.q_proj.weight": "llamaforcausallm__model__layers__1__self_attn__q_proj__weight", "model.layers.1.self_attn.k_proj.weight": "llamaforcausallm__model__layers__1__self_attn__k_proj__weight", "model.layers.1.self_attn.v_proj.weight": "llamaforcausallm__model__layers__1__self_attn__v_proj__weight", "model.layers.1.self_attn.o_proj.weight": "llamaforcausallm__model__layers__1__self_attn__o_proj__weight", "model.layers.1.mlp.gate_proj.weight": "llamaforcausallm__model__layers__1__mlp__gate_proj__weight", "model.layers.1.mlp.up_proj.weight": "llamaforcausallm__model__layers__1__mlp__up_proj__weight", "model.layers.1.mlp.down_proj.weight": "llamaforcausallm__model__layers__1__mlp__down_proj__weight", "model.layers.1.input_layernorm.weight": "llamaforcausallm__model__layers__1__input_layernorm__weight", "model.layers.1.post_attention_layernorm.weight": "llamaforcausallm__model__layers__1__post_attention_layernorm__weight", "model.layers.2.self_attn.q_proj.weight": "llamaforcausallm__model__layers__2__self_attn__q_proj__weight", "model.layers.2.self_attn.k_proj.weight": "llamaforcausallm__model__layers__2__self_attn__k_proj__weight", "model.layers.2.self_attn.v_proj.weight": "llamaforcausallm__model__layers__2__self_attn__v_proj__weight", "model.layers.2.self_attn.o_proj.weight": "llamaforcausallm__model__layers__2__self_attn__o_proj__weight", "model.layers.2.mlp.gate_proj.weight": "llamaforcausallm__model__layers__2__mlp__gate_proj__weight", "model.layers.2.mlp.up_proj.weight": "llamaforcausallm__model__layers__2__mlp__up_proj__weight", "model.layers.2.mlp.down_proj.weight": "llamaforcausallm__model__layers__2__mlp__down_proj__weight", "model.layers.2.input_layernorm.weight": "llamaforcausallm__model__layers__2__input_layernorm__weight", "model.layers.2.post_attention_layernorm.weight": "llamaforcausallm__model__layers__2__post_attention_layernorm__weight", "model.layers.3.self_attn.q_proj.weight": "llamaforcausallm__model__layers__3__self_attn__q_proj__weight", "model.layers.3.self_attn.k_proj.weight": "llamaforcausallm__model__layers__3__self_attn__k_proj__weight", "model.layers.3.self_attn.v_proj.weight": "llamaforcausallm__model__layers__3__self_attn__v_proj__weight", "model.layers.3.self_attn.o_proj.weight": "llamaforcausallm__model__layers__3__self_attn__o_proj__weight", "model.layers.3.mlp.gate_proj.weight": "llamaforcausallm__model__layers__3__mlp__gate_proj__weight", "model.layers.3.mlp.up_proj.weight": "llamaforcausallm__model__layers__3__mlp__up_proj__weight", "model.layers.3.mlp.down_proj.weight": "llamaforcausallm__model__layers__3__mlp__down_proj__weight", "model.layers.3.input_layernorm.weight": "llamaforcausallm__model__layers__3__input_layernorm__weight", "model.layers.3.post_attention_layernorm.weight": "llamaforcausallm__model__layers__3__post_attention_layernorm__weight", "model.layers.4.self_attn.q_proj.weight": "llamaforcausallm__model__layers__4__self_attn__q_proj__weight", "model.layers.4.self_attn.k_proj.weight": "llamaforcausallm__model__layers__4__self_attn__k_proj__weight", "model.layers.4.self_attn.v_proj.weight": "llamaforcausallm__model__layers__4__self_attn__v_proj__weight", "model.layers.4.self_attn.o_proj.weight": "llamaforcausallm__model__layers__4__self_attn__o_proj__weight", "model.layers.4.mlp.gate_proj.weight": "llamaforcausallm__model__layers__4__mlp__gate_proj__weight", "model.layers.4.mlp.up_proj.weight": "llamaforcausallm__model__layers__4__mlp__up_proj__weight", "model.layers.4.mlp.down_proj.weight": "llamaforcausallm__model__layers__4__mlp__down_proj__weight", "model.layers.4.input_layernorm.weight": "llamaforcausallm__model__layers__4__input_layernorm__weight", "model.layers.4.post_attention_layernorm.weight": "llamaforcausallm__model__layers__4__post_attention_layernorm__weight", "model.layers.5.self_attn.q_proj.weight": "llamaforcausallm__model__layers__5__self_attn__q_proj__weight", "model.layers.5.self_attn.k_proj.weight": "llamaforcausallm__model__layers__5__self_attn__k_proj__weight", "model.layers.5.self_attn.v_proj.weight": "llamaforcausallm__model__layers__5__self_attn__v_proj__weight", "model.layers.5.self_attn.o_proj.weight": "llamaforcausallm__model__layers__5__self_attn__o_proj__weight", "model.layers.5.mlp.gate_proj.weight": "llamaforcausallm__model__layers__5__mlp__gate_proj__weight", "model.layers.5.mlp.up_proj.weight": "llamaforcausallm__model__layers__5__mlp__up_proj__weight", "model.layers.5.mlp.down_proj.weight": "llamaforcausallm__model__layers__5__mlp__down_proj__weight", "model.layers.5.input_layernorm.weight": "llamaforcausallm__model__layers__5__input_layernorm__weight", "model.layers.5.post_attention_layernorm.weight": "llamaforcausallm__model__layers__5__post_attention_layernorm__weight", "model.layers.6.self_attn.q_proj.weight": "llamaforcausallm__model__layers__6__self_attn__q_proj__weight", "model.layers.6.self_attn.k_proj.weight": "llamaforcausallm__model__layers__6__self_attn__k_proj__weight", "model.layers.6.self_attn.v_proj.weight": "llamaforcausallm__model__layers__6__self_attn__v_proj__weight", "model.layers.6.self_attn.o_proj.weight": "llamaforcausallm__model__layers__6__self_attn__o_proj__weight", "model.layers.6.mlp.gate_proj.weight": "llamaforcausallm__model__layers__6__mlp__gate_proj__weight", "model.layers.6.mlp.up_proj.weight": "llamaforcausallm__model__layers__6__mlp__up_proj__weight", "model.layers.6.mlp.down_proj.weight": "llamaforcausallm__model__layers__6__mlp__down_proj__weight", "model.layers.6.input_layernorm.weight": "llamaforcausallm__model__layers__6__input_layernorm__weight", "model.layers.6.post_attention_layernorm.weight": "llamaforcausallm__model__layers__6__post_attention_layernorm__weight", "model.layers.7.self_attn.q_proj.weight": "llamaforcausallm__model__layers__7__self_attn__q_proj__weight", "model.layers.7.self_attn.k_proj.weight": "llamaforcausallm__model__layers__7__self_attn__k_proj__weight", "model.layers.7.self_attn.v_proj.weight": "llamaforcausallm__model__layers__7__self_attn__v_proj__weight", "model.layers.7.self_attn.o_proj.weight": "llamaforcausallm__model__layers__7__self_attn__o_proj__weight", "model.layers.7.mlp.gate_proj.weight": "llamaforcausallm__model__layers__7__mlp__gate_proj__weight", "model.layers.7.mlp.up_proj.weight": "llamaforcausallm__model__layers__7__mlp__up_proj__weight", "model.layers.7.mlp.down_proj.weight": "llamaforcausallm__model__layers__7__mlp__down_proj__weight", "model.layers.7.input_layernorm.weight": "llamaforcausallm__model__layers__7__input_layernorm__weight", "model.layers.7.post_attention_layernorm.weight": "llamaforcausallm__model__layers__7__post_attention_layernorm__weight", "model.layers.8.self_attn.q_proj.weight": "llamaforcausallm__model__layers__8__self_attn__q_proj__weight", "model.layers.8.self_attn.k_proj.weight": "llamaforcausallm__model__layers__8__self_attn__k_proj__weight", "model.layers.8.self_attn.v_proj.weight": "llamaforcausallm__model__layers__8__self_attn__v_proj__weight", "model.layers.8.self_attn.o_proj.weight": "llamaforcausallm__model__layers__8__self_attn__o_proj__weight", "model.layers.8.mlp.gate_proj.weight": "llamaforcausallm__model__layers__8__mlp__gate_proj__weight", "model.layers.8.mlp.up_proj.weight": "llamaforcausallm__model__layers__8__mlp__up_proj__weight", "model.layers.8.mlp.down_proj.weight": "llamaforcausallm__model__layers__8__mlp__down_proj__weight", "model.layers.8.input_layernorm.weight": "llamaforcausallm__model__layers__8__input_layernorm__weight", "model.layers.8.post_attention_layernorm.weight": "llamaforcausallm__model__layers__8__post_attention_layernorm__weight", "model.layers.9.self_attn.q_proj.weight": "llamaforcausallm__model__layers__9__self_attn__q_proj__weight", "model.layers.9.self_attn.k_proj.weight": "llamaforcausallm__model__layers__9__self_attn__k_proj__weight", "model.layers.9.self_attn.v_proj.weight": "llamaforcausallm__model__layers__9__self_attn__v_proj__weight", "model.layers.9.self_attn.o_proj.weight": "llamaforcausallm__model__layers__9__self_attn__o_proj__weight", "model.layers.9.mlp.gate_proj.weight": "llamaforcausallm__model__layers__9__mlp__gate_proj__weight", "model.layers.9.mlp.up_proj.weight": "llamaforcausallm__model__layers__9__mlp__up_proj__weight", "model.layers.9.mlp.down_proj.weight": "llamaforcausallm__model__layers__9__mlp__down_proj__weight", "model.layers.9.input_layernorm.weight": "llamaforcausallm__model__layers__9__input_layernorm__weight", "model.layers.9.post_attention_layernorm.weight": "llamaforcausallm__model__layers__9__post_attention_layernorm__weight", "model.layers.10.self_attn.q_proj.weight": "llamaforcausallm__model__layers__10__self_attn__q_proj__weight", "model.layers.10.self_attn.k_proj.weight": "llamaforcausallm__model__layers__10__self_attn__k_proj__weight", "model.layers.10.self_attn.v_proj.weight": "llamaforcausallm__model__layers__10__self_attn__v_proj__weight", "model.layers.10.self_attn.o_proj.weight": "llamaforcausallm__model__layers__10__self_attn__o_proj__weight", "model.layers.10.mlp.gate_proj.weight": "llamaforcausallm__model__layers__10__mlp__gate_proj__weight", "model.layers.10.mlp.up_proj.weight": "llamaforcausallm__model__layers__10__mlp__up_proj__weight", "model.layers.10.mlp.down_proj.weight": "llamaforcausallm__model__layers__10__mlp__down_proj__weight", "model.layers.10.input_layernorm.weight": "llamaforcausallm__model__layers__10__input_layernorm__weight", "model.layers.10.post_attention_layernorm.weight": "llamaforcausallm__model__layers__10__post_attention_layernorm__weight", "model.layers.11.self_attn.q_proj.weight": "llamaforcausallm__model__layers__11__self_attn__q_proj__weight", "model.layers.11.self_attn.k_proj.weight": "llamaforcausallm__model__layers__11__self_attn__k_proj__weight", "model.layers.11.self_attn.v_proj.weight": "llamaforcausallm__model__layers__11__self_attn__v_proj__weight", "model.layers.11.self_attn.o_proj.weight": "llamaforcausallm__model__layers__11__self_attn__o_proj__weight", "model.layers.11.mlp.gate_proj.weight": "llamaforcausallm__model__layers__11__mlp__gate_proj__weight", "model.layers.11.mlp.up_proj.weight": "llamaforcausallm__model__layers__11__mlp__up_proj__weight", "model.layers.11.mlp.down_proj.weight": "llamaforcausallm__model__layers__11__mlp__down_proj__weight", "model.layers.11.input_layernorm.weight": "llamaforcausallm__model__layers__11__input_layernorm__weight", "model.layers.11.post_attention_layernorm.weight": "llamaforcausallm__model__layers__11__post_attention_layernorm__weight", "model.layers.12.self_attn.q_proj.weight": "llamaforcausallm__model__layers__12__self_attn__q_proj__weight", "model.layers.12.self_attn.k_proj.weight": "llamaforcausallm__model__layers__12__self_attn__k_proj__weight", "model.layers.12.self_attn.v_proj.weight": "llamaforcausallm__model__layers__12__self_attn__v_proj__weight", "model.layers.12.self_attn.o_proj.weight": "llamaforcausallm__model__layers__12__self_attn__o_proj__weight", "model.layers.12.mlp.gate_proj.weight": "llamaforcausallm__model__layers__12__mlp__gate_proj__weight", "model.layers.12.mlp.up_proj.weight": "llamaforcausallm__model__layers__12__mlp__up_proj__weight", "model.layers.12.mlp.down_proj.weight": "llamaforcausallm__model__layers__12__mlp__down_proj__weight", "model.layers.12.input_layernorm.weight": "llamaforcausallm__model__layers__12__input_layernorm__weight", "model.layers.12.post_attention_layernorm.weight": "llamaforcausallm__model__layers__12__post_attention_layernorm__weight", "model.layers.13.self_attn.q_proj.weight": "llamaforcausallm__model__layers__13__self_attn__q_proj__weight", "model.layers.13.self_attn.k_proj.weight": "llamaforcausallm__model__layers__13__self_attn__k_proj__weight", "model.layers.13.self_attn.v_proj.weight": "llamaforcausallm__model__layers__13__self_attn__v_proj__weight", "model.layers.13.self_attn.o_proj.weight": "llamaforcausallm__model__layers__13__self_attn__o_proj__weight", "model.layers.13.mlp.gate_proj.weight": "llamaforcausallm__model__layers__13__mlp__gate_proj__weight", "model.layers.13.mlp.up_proj.weight": "llamaforcausallm__model__layers__13__mlp__up_proj__weight", "model.layers.13.mlp.down_proj.weight": "llamaforcausallm__model__layers__13__mlp__down_proj__weight", "model.layers.13.input_layernorm.weight": "llamaforcausallm__model__layers__13__input_layernorm__weight", "model.layers.13.post_attention_layernorm.weight": "llamaforcausallm__model__layers__13__post_attention_layernorm__weight", "model.layers.14.self_attn.q_proj.weight": "llamaforcausallm__model__layers__14__self_attn__q_proj__weight", "model.layers.14.self_attn.k_proj.weight": "llamaforcausallm__model__layers__14__self_attn__k_proj__weight", "model.layers.14.self_attn.v_proj.weight": "llamaforcausallm__model__layers__14__self_attn__v_proj__weight", "model.layers.14.self_attn.o_proj.weight": "llamaforcausallm__model__layers__14__self_attn__o_proj__weight", "model.layers.14.mlp.gate_proj.weight": "llamaforcausallm__model__layers__14__mlp__gate_proj__weight", "model.layers.14.mlp.up_proj.weight": "llamaforcausallm__model__layers__14__mlp__up_proj__weight", "model.layers.14.mlp.down_proj.weight": "llamaforcausallm__model__layers__14__mlp__down_proj__weight", "model.layers.14.input_layernorm.weight": "llamaforcausallm__model__layers__14__input_layernorm__weight", "model.layers.14.post_attention_layernorm.weight": "llamaforcausallm__model__layers__14__post_attention_layernorm__weight", "model.layers.15.self_attn.q_proj.weight": "llamaforcausallm__model__layers__15__self_attn__q_proj__weight", "model.layers.15.self_attn.k_proj.weight": "llamaforcausallm__model__layers__15__self_attn__k_proj__weight", "model.layers.15.self_attn.v_proj.weight": "llamaforcausallm__model__layers__15__self_attn__v_proj__weight", "model.layers.15.self_attn.o_proj.weight": "llamaforcausallm__model__layers__15__self_attn__o_proj__weight", "model.layers.15.mlp.gate_proj.weight": "llamaforcausallm__model__layers__15__mlp__gate_proj__weight", "model.layers.15.mlp.up_proj.weight": "llamaforcausallm__model__layers__15__mlp__up_proj__weight", "model.layers.15.mlp.down_proj.weight": "llamaforcausallm__model__layers__15__mlp__down_proj__weight", "model.layers.15.input_layernorm.weight": "llamaforcausallm__model__layers__15__input_layernorm__weight", "model.layers.15.post_attention_layernorm.weight": "llamaforcausallm__model__layers__15__post_attention_layernorm__weight", "model.layers.16.self_attn.q_proj.weight": "llamaforcausallm__model__layers__16__self_attn__q_proj__weight", "model.layers.16.self_attn.k_proj.weight": "llamaforcausallm__model__layers__16__self_attn__k_proj__weight", "model.layers.16.self_attn.v_proj.weight": "llamaforcausallm__model__layers__16__self_attn__v_proj__weight", "model.layers.16.self_attn.o_proj.weight": "llamaforcausallm__model__layers__16__self_attn__o_proj__weight", "model.layers.16.mlp.gate_proj.weight": "llamaforcausallm__model__layers__16__mlp__gate_proj__weight", "model.layers.16.mlp.up_proj.weight": "llamaforcausallm__model__layers__16__mlp__up_proj__weight", "model.layers.16.mlp.down_proj.weight": "llamaforcausallm__model__layers__16__mlp__down_proj__weight", "model.layers.16.input_layernorm.weight": "llamaforcausallm__model__layers__16__input_layernorm__weight", "model.layers.16.post_attention_layernorm.weight": "llamaforcausallm__model__layers__16__post_attention_layernorm__weight", "model.layers.17.self_attn.q_proj.weight": "llamaforcausallm__model__layers__17__self_attn__q_proj__weight", "model.layers.17.self_attn.k_proj.weight": "llamaforcausallm__model__layers__17__self_attn__k_proj__weight", "model.layers.17.self_attn.v_proj.weight": "llamaforcausallm__model__layers__17__self_attn__v_proj__weight", "model.layers.17.self_attn.o_proj.weight": "llamaforcausallm__model__layers__17__self_attn__o_proj__weight", "model.layers.17.mlp.gate_proj.weight": "llamaforcausallm__model__layers__17__mlp__gate_proj__weight", "model.layers.17.mlp.up_proj.weight": "llamaforcausallm__model__layers__17__mlp__up_proj__weight", "model.layers.17.mlp.down_proj.weight": "llamaforcausallm__model__layers__17__mlp__down_proj__weight", "model.layers.17.input_layernorm.weight": "llamaforcausallm__model__layers__17__input_layernorm__weight", "model.layers.17.post_attention_layernorm.weight": "llamaforcausallm__model__layers__17__post_attention_layernorm__weight", "model.layers.18.self_attn.q_proj.weight": "llamaforcausallm__model__layers__18__self_attn__q_proj__weight", "model.layers.18.self_attn.k_proj.weight": "llamaforcausallm__model__layers__18__self_attn__k_proj__weight", "model.layers.18.self_attn.v_proj.weight": "llamaforcausallm__model__layers__18__self_attn__v_proj__weight", "model.layers.18.self_attn.o_proj.weight": "llamaforcausallm__model__layers__18__self_attn__o_proj__weight", "model.layers.18.mlp.gate_proj.weight": "llamaforcausallm__model__layers__18__mlp__gate_proj__weight", "model.layers.18.mlp.up_proj.weight": "llamaforcausallm__model__layers__18__mlp__up_proj__weight", "model.layers.18.mlp.down_proj.weight": "llamaforcausallm__model__layers__18__mlp__down_proj__weight", "model.layers.18.input_layernorm.weight": "llamaforcausallm__model__layers__18__input_layernorm__weight", "model.layers.18.post_attention_layernorm.weight": "llamaforcausallm__model__layers__18__post_attention_layernorm__weight", "model.layers.19.self_attn.q_proj.weight": "llamaforcausallm__model__layers__19__self_attn__q_proj__weight", "model.layers.19.self_attn.k_proj.weight": "llamaforcausallm__model__layers__19__self_attn__k_proj__weight", "model.layers.19.self_attn.v_proj.weight": "llamaforcausallm__model__layers__19__self_attn__v_proj__weight", "model.layers.19.self_attn.o_proj.weight": "llamaforcausallm__model__layers__19__self_attn__o_proj__weight", "model.layers.19.mlp.gate_proj.weight": "llamaforcausallm__model__layers__19__mlp__gate_proj__weight", "model.layers.19.mlp.up_proj.weight": "llamaforcausallm__model__layers__19__mlp__up_proj__weight", "model.layers.19.mlp.down_proj.weight": "llamaforcausallm__model__layers__19__mlp__down_proj__weight", "model.layers.19.input_layernorm.weight": "llamaforcausallm__model__layers__19__input_layernorm__weight", "model.layers.19.post_attention_layernorm.weight": "llamaforcausallm__model__layers__19__post_attention_layernorm__weight", "model.layers.20.self_attn.q_proj.weight": "llamaforcausallm__model__layers__20__self_attn__q_proj__weight", "model.layers.20.self_attn.k_proj.weight": "llamaforcausallm__model__layers__20__self_attn__k_proj__weight", "model.layers.20.self_attn.v_proj.weight": "llamaforcausallm__model__layers__20__self_attn__v_proj__weight", "model.layers.20.self_attn.o_proj.weight": "llamaforcausallm__model__layers__20__self_attn__o_proj__weight", "model.layers.20.mlp.gate_proj.weight": "llamaforcausallm__model__layers__20__mlp__gate_proj__weight", "model.layers.20.mlp.up_proj.weight": "llamaforcausallm__model__layers__20__mlp__up_proj__weight", "model.layers.20.mlp.down_proj.weight": "llamaforcausallm__model__layers__20__mlp__down_proj__weight", "model.layers.20.input_layernorm.weight": "llamaforcausallm__model__layers__20__input_layernorm__weight", "model.layers.20.post_attention_layernorm.weight": "llamaforcausallm__model__layers__20__post_attention_layernorm__weight", "model.layers.21.self_attn.q_proj.weight": "llamaforcausallm__model__layers__21__self_attn__q_proj__weight", "model.layers.21.self_attn.k_proj.weight": "llamaforcausallm__model__layers__21__self_attn__k_proj__weight", "model.layers.21.self_attn.v_proj.weight": "llamaforcausallm__model__layers__21__self_attn__v_proj__weight", "model.layers.21.self_attn.o_proj.weight": "llamaforcausallm__model__layers__21__self_attn__o_proj__weight", "model.layers.21.mlp.gate_proj.weight": "llamaforcausallm__model__layers__21__mlp__gate_proj__weight", "model.layers.21.mlp.up_proj.weight": "llamaforcausallm__model__layers__21__mlp__up_proj__weight", "model.layers.21.mlp.down_proj.weight": "llamaforcausallm__model__layers__21__mlp__down_proj__weight", "model.layers.21.input_layernorm.weight": "llamaforcausallm__model__layers__21__input_layernorm__weight", "model.layers.21.post_attention_layernorm.weight": "llamaforcausallm__model__layers__21__post_attention_layernorm__weight", "model.layers.22.self_attn.q_proj.weight": "llamaforcausallm__model__layers__22__self_attn__q_proj__weight", "model.layers.22.self_attn.k_proj.weight": "llamaforcausallm__model__layers__22__self_attn__k_proj__weight", "model.layers.22.self_attn.v_proj.weight": "llamaforcausallm__model__layers__22__self_attn__v_proj__weight", "model.layers.22.self_attn.o_proj.weight": "llamaforcausallm__model__layers__22__self_attn__o_proj__weight", "model.layers.22.mlp.gate_proj.weight": "llamaforcausallm__model__layers__22__mlp__gate_proj__weight", "model.layers.22.mlp.up_proj.weight": "llamaforcausallm__model__layers__22__mlp__up_proj__weight", "model.layers.22.mlp.down_proj.weight": "llamaforcausallm__model__layers__22__mlp__down_proj__weight", "model.layers.22.input_layernorm.weight": "llamaforcausallm__model__layers__22__input_layernorm__weight", "model.layers.22.post_attention_layernorm.weight": "llamaforcausallm__model__layers__22__post_attention_layernorm__weight", "model.layers.23.self_attn.q_proj.weight": "llamaforcausallm__model__layers__23__self_attn__q_proj__weight", "model.layers.23.self_attn.k_proj.weight": "llamaforcausallm__model__layers__23__self_attn__k_proj__weight", "model.layers.23.self_attn.v_proj.weight": "llamaforcausallm__model__layers__23__self_attn__v_proj__weight", "model.layers.23.self_attn.o_proj.weight": "llamaforcausallm__model__layers__23__self_attn__o_proj__weight", "model.layers.23.mlp.gate_proj.weight": "llamaforcausallm__model__layers__23__mlp__gate_proj__weight", "model.layers.23.mlp.up_proj.weight": "llamaforcausallm__model__layers__23__mlp__up_proj__weight", "model.layers.23.mlp.down_proj.weight": "llamaforcausallm__model__layers__23__mlp__down_proj__weight", "model.layers.23.input_layernorm.weight": "llamaforcausallm__model__layers__23__input_layernorm__weight", "model.layers.23.post_attention_layernorm.weight": "llamaforcausallm__model__layers__23__post_attention_layernorm__weight", "model.layers.24.self_attn.q_proj.weight": "llamaforcausallm__model__layers__24__self_attn__q_proj__weight", "model.layers.24.self_attn.k_proj.weight": "llamaforcausallm__model__layers__24__self_attn__k_proj__weight", "model.layers.24.self_attn.v_proj.weight": "llamaforcausallm__model__layers__24__self_attn__v_proj__weight", "model.layers.24.self_attn.o_proj.weight": "llamaforcausallm__model__layers__24__self_attn__o_proj__weight", "model.layers.24.mlp.gate_proj.weight": "llamaforcausallm__model__layers__24__mlp__gate_proj__weight", "model.layers.24.mlp.up_proj.weight": "llamaforcausallm__model__layers__24__mlp__up_proj__weight", "model.layers.24.mlp.down_proj.weight": "llamaforcausallm__model__layers__24__mlp__down_proj__weight", "model.layers.24.input_layernorm.weight": "llamaforcausallm__model__layers__24__input_layernorm__weight", "model.layers.24.post_attention_layernorm.weight": "llamaforcausallm__model__layers__24__post_attention_layernorm__weight", "model.layers.25.self_attn.q_proj.weight": "llamaforcausallm__model__layers__25__self_attn__q_proj__weight", "model.layers.25.self_attn.k_proj.weight": "llamaforcausallm__model__layers__25__self_attn__k_proj__weight", "model.layers.25.self_attn.v_proj.weight": "llamaforcausallm__model__layers__25__self_attn__v_proj__weight", "model.layers.25.self_attn.o_proj.weight": "llamaforcausallm__model__layers__25__self_attn__o_proj__weight", "model.layers.25.mlp.gate_proj.weight": "llamaforcausallm__model__layers__25__mlp__gate_proj__weight", "model.layers.25.mlp.up_proj.weight": "llamaforcausallm__model__layers__25__mlp__up_proj__weight", "model.layers.25.mlp.down_proj.weight": "llamaforcausallm__model__layers__25__mlp__down_proj__weight", "model.layers.25.input_layernorm.weight": "llamaforcausallm__model__layers__25__input_layernorm__weight", "model.layers.25.post_attention_layernorm.weight": "llamaforcausallm__model__layers__25__post_attention_layernorm__weight", "model.layers.26.self_attn.q_proj.weight": "llamaforcausallm__model__layers__26__self_attn__q_proj__weight", "model.layers.26.self_attn.k_proj.weight": "llamaforcausallm__model__layers__26__self_attn__k_proj__weight", "model.layers.26.self_attn.v_proj.weight": "llamaforcausallm__model__layers__26__self_attn__v_proj__weight", "model.layers.26.self_attn.o_proj.weight": "llamaforcausallm__model__layers__26__self_attn__o_proj__weight", "model.layers.26.mlp.gate_proj.weight": "llamaforcausallm__model__layers__26__mlp__gate_proj__weight", "model.layers.26.mlp.up_proj.weight": "llamaforcausallm__model__layers__26__mlp__up_proj__weight", "model.layers.26.mlp.down_proj.weight": "llamaforcausallm__model__layers__26__mlp__down_proj__weight", "model.layers.26.input_layernorm.weight": "llamaforcausallm__model__layers__26__input_layernorm__weight", "model.layers.26.post_attention_layernorm.weight": "llamaforcausallm__model__layers__26__post_attention_layernorm__weight", "model.layers.27.self_attn.q_proj.weight": "llamaforcausallm__model__layers__27__self_attn__q_proj__weight", "model.layers.27.self_attn.k_proj.weight": "llamaforcausallm__model__layers__27__self_attn__k_proj__weight", "model.layers.27.self_attn.v_proj.weight": "llamaforcausallm__model__layers__27__self_attn__v_proj__weight", "model.layers.27.self_attn.o_proj.weight": "llamaforcausallm__model__layers__27__self_attn__o_proj__weight", "model.layers.27.mlp.gate_proj.weight": "llamaforcausallm__model__layers__27__mlp__gate_proj__weight", "model.layers.27.mlp.up_proj.weight": "llamaforcausallm__model__layers__27__mlp__up_proj__weight", "model.layers.27.mlp.down_proj.weight": "llamaforcausallm__model__layers__27__mlp__down_proj__weight", "model.layers.27.input_layernorm.weight": "llamaforcausallm__model__layers__27__input_layernorm__weight", "model.layers.27.post_attention_layernorm.weight": "llamaforcausallm__model__layers__27__post_attention_layernorm__weight", "model.layers.28.self_attn.q_proj.weight": "llamaforcausallm__model__layers__28__self_attn__q_proj__weight", "model.layers.28.self_attn.k_proj.weight": "llamaforcausallm__model__layers__28__self_attn__k_proj__weight", "model.layers.28.self_attn.v_proj.weight": "llamaforcausallm__model__layers__28__self_attn__v_proj__weight", "model.layers.28.self_attn.o_proj.weight": "llamaforcausallm__model__layers__28__self_attn__o_proj__weight", "model.layers.28.mlp.gate_proj.weight": "llamaforcausallm__model__layers__28__mlp__gate_proj__weight", "model.layers.28.mlp.up_proj.weight": "llamaforcausallm__model__layers__28__mlp__up_proj__weight", "model.layers.28.mlp.down_proj.weight": "llamaforcausallm__model__layers__28__mlp__down_proj__weight", "model.layers.28.input_layernorm.weight": "llamaforcausallm__model__layers__28__input_layernorm__weight", "model.layers.28.post_attention_layernorm.weight": "llamaforcausallm__model__layers__28__post_attention_layernorm__weight", "model.layers.29.self_attn.q_proj.weight": "llamaforcausallm__model__layers__29__self_attn__q_proj__weight", "model.layers.29.self_attn.k_proj.weight": "llamaforcausallm__model__layers__29__self_attn__k_proj__weight", "model.layers.29.self_attn.v_proj.weight": "llamaforcausallm__model__layers__29__self_attn__v_proj__weight", "model.layers.29.self_attn.o_proj.weight": "llamaforcausallm__model__layers__29__self_attn__o_proj__weight", "model.layers.29.mlp.gate_proj.weight": "llamaforcausallm__model__layers__29__mlp__gate_proj__weight", "model.layers.29.mlp.up_proj.weight": "llamaforcausallm__model__layers__29__mlp__up_proj__weight", "model.layers.29.mlp.down_proj.weight": "llamaforcausallm__model__layers__29__mlp__down_proj__weight", "model.layers.29.input_layernorm.weight": "llamaforcausallm__model__layers__29__input_layernorm__weight", "model.layers.29.post_attention_layernorm.weight": "llamaforcausallm__model__layers__29__post_attention_layernorm__weight", "model.layers.30.self_attn.q_proj.weight": "llamaforcausallm__model__layers__30__self_attn__q_proj__weight", "model.layers.30.self_attn.k_proj.weight": "llamaforcausallm__model__layers__30__self_attn__k_proj__weight", "model.layers.30.self_attn.v_proj.weight": "llamaforcausallm__model__layers__30__self_attn__v_proj__weight", "model.layers.30.self_attn.o_proj.weight": "llamaforcausallm__model__layers__30__self_attn__o_proj__weight", "model.layers.30.mlp.gate_proj.weight": "llamaforcausallm__model__layers__30__mlp__gate_proj__weight", "model.layers.30.mlp.up_proj.weight": "llamaforcausallm__model__layers__30__mlp__up_proj__weight", "model.layers.30.mlp.down_proj.weight": "llamaforcausallm__model__layers__30__mlp__down_proj__weight", "model.layers.30.input_layernorm.weight": "llamaforcausallm__model__layers__30__input_layernorm__weight", "model.layers.30.post_attention_layernorm.weight": "llamaforcausallm__model__layers__30__post_attention_layernorm__weight", "model.layers.31.self_attn.q_proj.weight": "llamaforcausallm__model__layers__31__self_attn__q_proj__weight", "model.layers.31.self_attn.k_proj.weight": "llamaforcausallm__model__layers__31__self_attn__k_proj__weight", "model.layers.31.self_attn.v_proj.weight": "llamaforcausallm__model__layers__31__self_attn__v_proj__weight", "model.layers.31.self_attn.o_proj.weight": "llamaforcausallm__model__layers__31__self_attn__o_proj__weight", "model.layers.31.mlp.gate_proj.weight": "llamaforcausallm__model__layers__31__mlp__gate_proj__weight", "model.layers.31.mlp.up_proj.weight": "llamaforcausallm__model__layers__31__mlp__up_proj__weight", "model.layers.31.mlp.down_proj.weight": "llamaforcausallm__model__layers__31__mlp__down_proj__weight", "model.layers.31.input_layernorm.weight": "llamaforcausallm__model__layers__31__input_layernorm__weight", "model.layers.31.post_attention_layernorm.weight": "llamaforcausallm__model__layers__31__post_attention_layernorm__weight", "model.layers.32.self_attn.q_proj.weight": "llamaforcausallm__model__layers__32__self_attn__q_proj__weight", "model.layers.32.self_attn.k_proj.weight": "llamaforcausallm__model__layers__32__self_attn__k_proj__weight", "model.layers.32.self_attn.v_proj.weight": "llamaforcausallm__model__layers__32__self_attn__v_proj__weight", "model.layers.32.self_attn.o_proj.weight": "llamaforcausallm__model__layers__32__self_attn__o_proj__weight", "model.layers.32.mlp.gate_proj.weight": "llamaforcausallm__model__layers__32__mlp__gate_proj__weight", "model.layers.32.mlp.up_proj.weight": "llamaforcausallm__model__layers__32__mlp__up_proj__weight", "model.layers.32.mlp.down_proj.weight": "llamaforcausallm__model__layers__32__mlp__down_proj__weight", "model.layers.32.input_layernorm.weight": "llamaforcausallm__model__layers__32__input_layernorm__weight", "model.layers.32.post_attention_layernorm.weight": "llamaforcausallm__model__layers__32__post_attention_layernorm__weight", "model.layers.33.self_attn.q_proj.weight": "llamaforcausallm__model__layers__33__self_attn__q_proj__weight", "model.layers.33.self_attn.k_proj.weight": "llamaforcausallm__model__layers__33__self_attn__k_proj__weight", "model.layers.33.self_attn.v_proj.weight": "llamaforcausallm__model__layers__33__self_attn__v_proj__weight", "model.layers.33.self_attn.o_proj.weight": "llamaforcausallm__model__layers__33__self_attn__o_proj__weight", "model.layers.33.mlp.gate_proj.weight": "llamaforcausallm__model__layers__33__mlp__gate_proj__weight", "model.layers.33.mlp.up_proj.weight": "llamaforcausallm__model__layers__33__mlp__up_proj__weight", "model.layers.33.mlp.down_proj.weight": "llamaforcausallm__model__layers__33__mlp__down_proj__weight", "model.layers.33.input_layernorm.weight": "llamaforcausallm__model__layers__33__input_layernorm__weight", "model.layers.33.post_attention_layernorm.weight": "llamaforcausallm__model__layers__33__post_attention_layernorm__weight", "model.layers.34.self_attn.q_proj.weight": "llamaforcausallm__model__layers__34__self_attn__q_proj__weight", "model.layers.34.self_attn.k_proj.weight": "llamaforcausallm__model__layers__34__self_attn__k_proj__weight", "model.layers.34.self_attn.v_proj.weight": "llamaforcausallm__model__layers__34__self_attn__v_proj__weight", "model.layers.34.self_attn.o_proj.weight": "llamaforcausallm__model__layers__34__self_attn__o_proj__weight", "model.layers.34.mlp.gate_proj.weight": "llamaforcausallm__model__layers__34__mlp__gate_proj__weight", "model.layers.34.mlp.up_proj.weight": "llamaforcausallm__model__layers__34__mlp__up_proj__weight", "model.layers.34.mlp.down_proj.weight": "llamaforcausallm__model__layers__34__mlp__down_proj__weight", "model.layers.34.input_layernorm.weight": "llamaforcausallm__model__layers__34__input_layernorm__weight", "model.layers.34.post_attention_layernorm.weight": "llamaforcausallm__model__layers__34__post_attention_layernorm__weight", "model.layers.35.self_attn.q_proj.weight": "llamaforcausallm__model__layers__35__self_attn__q_proj__weight", "model.layers.35.self_attn.k_proj.weight": "llamaforcausallm__model__layers__35__self_attn__k_proj__weight", "model.layers.35.self_attn.v_proj.weight": "llamaforcausallm__model__layers__35__self_attn__v_proj__weight", "model.layers.35.self_attn.o_proj.weight": "llamaforcausallm__model__layers__35__self_attn__o_proj__weight", "model.layers.35.mlp.gate_proj.weight": "llamaforcausallm__model__layers__35__mlp__gate_proj__weight", "model.layers.35.mlp.up_proj.weight": "llamaforcausallm__model__layers__35__mlp__up_proj__weight", "model.layers.35.mlp.down_proj.weight": "llamaforcausallm__model__layers__35__mlp__down_proj__weight", "model.layers.35.input_layernorm.weight": "llamaforcausallm__model__layers__35__input_layernorm__weight", "model.layers.35.post_attention_layernorm.weight": "llamaforcausallm__model__layers__35__post_attention_layernorm__weight", "model.layers.36.self_attn.q_proj.weight": "llamaforcausallm__model__layers__36__self_attn__q_proj__weight", "model.layers.36.self_attn.k_proj.weight": "llamaforcausallm__model__layers__36__self_attn__k_proj__weight", "model.layers.36.self_attn.v_proj.weight": "llamaforcausallm__model__layers__36__self_attn__v_proj__weight", "model.layers.36.self_attn.o_proj.weight": "llamaforcausallm__model__layers__36__self_attn__o_proj__weight", "model.layers.36.mlp.gate_proj.weight": "llamaforcausallm__model__layers__36__mlp__gate_proj__weight", "model.layers.36.mlp.up_proj.weight": "llamaforcausallm__model__layers__36__mlp__up_proj__weight", "model.layers.36.mlp.down_proj.weight": "llamaforcausallm__model__layers__36__mlp__down_proj__weight", "model.layers.36.input_layernorm.weight": "llamaforcausallm__model__layers__36__input_layernorm__weight", "model.layers.36.post_attention_layernorm.weight": "llamaforcausallm__model__layers__36__post_attention_layernorm__weight", "model.layers.37.self_attn.q_proj.weight": "llamaforcausallm__model__layers__37__self_attn__q_proj__weight", "model.layers.37.self_attn.k_proj.weight": "llamaforcausallm__model__layers__37__self_attn__k_proj__weight", "model.layers.37.self_attn.v_proj.weight": "llamaforcausallm__model__layers__37__self_attn__v_proj__weight", "model.layers.37.self_attn.o_proj.weight": "llamaforcausallm__model__layers__37__self_attn__o_proj__weight", "model.layers.37.mlp.gate_proj.weight": "llamaforcausallm__model__layers__37__mlp__gate_proj__weight", "model.layers.37.mlp.up_proj.weight": "llamaforcausallm__model__layers__37__mlp__up_proj__weight", "model.layers.37.mlp.down_proj.weight": "llamaforcausallm__model__layers__37__mlp__down_proj__weight", "model.layers.37.input_layernorm.weight": "llamaforcausallm__model__layers__37__input_layernorm__weight", "model.layers.37.post_attention_layernorm.weight": "llamaforcausallm__model__layers__37__post_attention_layernorm__weight", "model.layers.38.self_attn.q_proj.weight": "llamaforcausallm__model__layers__38__self_attn__q_proj__weight", "model.layers.38.self_attn.k_proj.weight": "llamaforcausallm__model__layers__38__self_attn__k_proj__weight", "model.layers.38.self_attn.v_proj.weight": "llamaforcausallm__model__layers__38__self_attn__v_proj__weight", "model.layers.38.self_attn.o_proj.weight": "llamaforcausallm__model__layers__38__self_attn__o_proj__weight", "model.layers.38.mlp.gate_proj.weight": "llamaforcausallm__model__layers__38__mlp__gate_proj__weight", "model.layers.38.mlp.up_proj.weight": "llamaforcausallm__model__layers__38__mlp__up_proj__weight", "model.layers.38.mlp.down_proj.weight": "llamaforcausallm__model__layers__38__mlp__down_proj__weight", "model.layers.38.input_layernorm.weight": "llamaforcausallm__model__layers__38__input_layernorm__weight", "model.layers.38.post_attention_layernorm.weight": "llamaforcausallm__model__layers__38__post_attention_layernorm__weight", "model.layers.39.self_attn.q_proj.weight": "llamaforcausallm__model__layers__39__self_attn__q_proj__weight", "model.layers.39.self_attn.k_proj.weight": "llamaforcausallm__model__layers__39__self_attn__k_proj__weight", "model.layers.39.self_attn.v_proj.weight": "llamaforcausallm__model__layers__39__self_attn__v_proj__weight", "model.layers.39.self_attn.o_proj.weight": "llamaforcausallm__model__layers__39__self_attn__o_proj__weight", "model.layers.39.mlp.gate_proj.weight": "llamaforcausallm__model__layers__39__mlp__gate_proj__weight", "model.layers.39.mlp.up_proj.weight": "llamaforcausallm__model__layers__39__mlp__up_proj__weight", "model.layers.39.mlp.down_proj.weight": "llamaforcausallm__model__layers__39__mlp__down_proj__weight", "model.layers.39.input_layernorm.weight": "llamaforcausallm__model__layers__39__input_layernorm__weight", "model.layers.39.post_attention_layernorm.weight": "llamaforcausallm__model__layers__39__post_attention_layernorm__weight", "model.layers.40.self_attn.q_proj.weight": "llamaforcausallm__model__layers__40__self_attn__q_proj__weight", "model.layers.40.self_attn.k_proj.weight": "llamaforcausallm__model__layers__40__self_attn__k_proj__weight", "model.layers.40.self_attn.v_proj.weight": "llamaforcausallm__model__layers__40__self_attn__v_proj__weight", "model.layers.40.self_attn.o_proj.weight": "llamaforcausallm__model__layers__40__self_attn__o_proj__weight", "model.layers.40.mlp.gate_proj.weight": "llamaforcausallm__model__layers__40__mlp__gate_proj__weight", "model.layers.40.mlp.up_proj.weight": "llamaforcausallm__model__layers__40__mlp__up_proj__weight", "model.layers.40.mlp.down_proj.weight": "llamaforcausallm__model__layers__40__mlp__down_proj__weight", "model.layers.40.input_layernorm.weight": "llamaforcausallm__model__layers__40__input_layernorm__weight", "model.layers.40.post_attention_layernorm.weight": "llamaforcausallm__model__layers__40__post_attention_layernorm__weight", "model.layers.41.self_attn.q_proj.weight": "llamaforcausallm__model__layers__41__self_attn__q_proj__weight", "model.layers.41.self_attn.k_proj.weight": "llamaforcausallm__model__layers__41__self_attn__k_proj__weight", "model.layers.41.self_attn.v_proj.weight": "llamaforcausallm__model__layers__41__self_attn__v_proj__weight", "model.layers.41.self_attn.o_proj.weight": "llamaforcausallm__model__layers__41__self_attn__o_proj__weight", "model.layers.41.mlp.gate_proj.weight": "llamaforcausallm__model__layers__41__mlp__gate_proj__weight", "model.layers.41.mlp.up_proj.weight": "llamaforcausallm__model__layers__41__mlp__up_proj__weight", "model.layers.41.mlp.down_proj.weight": "llamaforcausallm__model__layers__41__mlp__down_proj__weight", "model.layers.41.input_layernorm.weight": "llamaforcausallm__model__layers__41__input_layernorm__weight", "model.layers.41.post_attention_layernorm.weight": "llamaforcausallm__model__layers__41__post_attention_layernorm__weight", "model.layers.42.self_attn.q_proj.weight": "llamaforcausallm__model__layers__42__self_attn__q_proj__weight", "model.layers.42.self_attn.k_proj.weight": "llamaforcausallm__model__layers__42__self_attn__k_proj__weight", "model.layers.42.self_attn.v_proj.weight": "llamaforcausallm__model__layers__42__self_attn__v_proj__weight", "model.layers.42.self_attn.o_proj.weight": "llamaforcausallm__model__layers__42__self_attn__o_proj__weight", "model.layers.42.mlp.gate_proj.weight": "llamaforcausallm__model__layers__42__mlp__gate_proj__weight", "model.layers.42.mlp.up_proj.weight": "llamaforcausallm__model__layers__42__mlp__up_proj__weight", "model.layers.42.mlp.down_proj.weight": "llamaforcausallm__model__layers__42__mlp__down_proj__weight", "model.layers.42.input_layernorm.weight": "llamaforcausallm__model__layers__42__input_layernorm__weight", "model.layers.42.post_attention_layernorm.weight": "llamaforcausallm__model__layers__42__post_attention_layernorm__weight", "model.layers.43.self_attn.q_proj.weight": "llamaforcausallm__model__layers__43__self_attn__q_proj__weight", "model.layers.43.self_attn.k_proj.weight": "llamaforcausallm__model__layers__43__self_attn__k_proj__weight", "model.layers.43.self_attn.v_proj.weight": "llamaforcausallm__model__layers__43__self_attn__v_proj__weight", "model.layers.43.self_attn.o_proj.weight": "llamaforcausallm__model__layers__43__self_attn__o_proj__weight", "model.layers.43.mlp.gate_proj.weight": "llamaforcausallm__model__layers__43__mlp__gate_proj__weight", "model.layers.43.mlp.up_proj.weight": "llamaforcausallm__model__layers__43__mlp__up_proj__weight", "model.layers.43.mlp.down_proj.weight": "llamaforcausallm__model__layers__43__mlp__down_proj__weight", "model.layers.43.input_layernorm.weight": "llamaforcausallm__model__layers__43__input_layernorm__weight", "model.layers.43.post_attention_layernorm.weight": "llamaforcausallm__model__layers__43__post_attention_layernorm__weight", "model.layers.44.self_attn.q_proj.weight": "llamaforcausallm__model__layers__44__self_attn__q_proj__weight", "model.layers.44.self_attn.k_proj.weight": "llamaforcausallm__model__layers__44__self_attn__k_proj__weight", "model.layers.44.self_attn.v_proj.weight": "llamaforcausallm__model__layers__44__self_attn__v_proj__weight", "model.layers.44.self_attn.o_proj.weight": "llamaforcausallm__model__layers__44__self_attn__o_proj__weight", "model.layers.44.mlp.gate_proj.weight": "llamaforcausallm__model__layers__44__mlp__gate_proj__weight", "model.layers.44.mlp.up_proj.weight": "llamaforcausallm__model__layers__44__mlp__up_proj__weight", "model.layers.44.mlp.down_proj.weight": "llamaforcausallm__model__layers__44__mlp__down_proj__weight", "model.layers.44.input_layernorm.weight": "llamaforcausallm__model__layers__44__input_layernorm__weight", "model.layers.44.post_attention_layernorm.weight": "llamaforcausallm__model__layers__44__post_attention_layernorm__weight", "model.layers.45.self_attn.q_proj.weight": "llamaforcausallm__model__layers__45__self_attn__q_proj__weight", "model.layers.45.self_attn.k_proj.weight": "llamaforcausallm__model__layers__45__self_attn__k_proj__weight", "model.layers.45.self_attn.v_proj.weight": "llamaforcausallm__model__layers__45__self_attn__v_proj__weight", "model.layers.45.self_attn.o_proj.weight": "llamaforcausallm__model__layers__45__self_attn__o_proj__weight", "model.layers.45.mlp.gate_proj.weight": "llamaforcausallm__model__layers__45__mlp__gate_proj__weight", "model.layers.45.mlp.up_proj.weight": "llamaforcausallm__model__layers__45__mlp__up_proj__weight", "model.layers.45.mlp.down_proj.weight": "llamaforcausallm__model__layers__45__mlp__down_proj__weight", "model.layers.45.input_layernorm.weight": "llamaforcausallm__model__layers__45__input_layernorm__weight", "model.layers.45.post_attention_layernorm.weight": "llamaforcausallm__model__layers__45__post_attention_layernorm__weight", "model.layers.46.self_attn.q_proj.weight": "llamaforcausallm__model__layers__46__self_attn__q_proj__weight", "model.layers.46.self_attn.k_proj.weight": "llamaforcausallm__model__layers__46__self_attn__k_proj__weight", "model.layers.46.self_attn.v_proj.weight": "llamaforcausallm__model__layers__46__self_attn__v_proj__weight", "model.layers.46.self_attn.o_proj.weight": "llamaforcausallm__model__layers__46__self_attn__o_proj__weight", "model.layers.46.mlp.gate_proj.weight": "llamaforcausallm__model__layers__46__mlp__gate_proj__weight", "model.layers.46.mlp.up_proj.weight": "llamaforcausallm__model__layers__46__mlp__up_proj__weight", "model.layers.46.mlp.down_proj.weight": "llamaforcausallm__model__layers__46__mlp__down_proj__weight", "model.layers.46.input_layernorm.weight": "llamaforcausallm__model__layers__46__input_layernorm__weight", "model.layers.46.post_attention_layernorm.weight": "llamaforcausallm__model__layers__46__post_attention_layernorm__weight", "model.layers.47.self_attn.q_proj.weight": "llamaforcausallm__model__layers__47__self_attn__q_proj__weight", "model.layers.47.self_attn.k_proj.weight": "llamaforcausallm__model__layers__47__self_attn__k_proj__weight", "model.layers.47.self_attn.v_proj.weight": "llamaforcausallm__model__layers__47__self_attn__v_proj__weight", "model.layers.47.self_attn.o_proj.weight": "llamaforcausallm__model__layers__47__self_attn__o_proj__weight", "model.layers.47.mlp.gate_proj.weight": "llamaforcausallm__model__layers__47__mlp__gate_proj__weight", "model.layers.47.mlp.up_proj.weight": "llamaforcausallm__model__layers__47__mlp__up_proj__weight", "model.layers.47.mlp.down_proj.weight": "llamaforcausallm__model__layers__47__mlp__down_proj__weight", "model.layers.47.input_layernorm.weight": "llamaforcausallm__model__layers__47__input_layernorm__weight", "model.layers.47.post_attention_layernorm.weight": "llamaforcausallm__model__layers__47__post_attention_layernorm__weight", "model.layers.48.self_attn.q_proj.weight": "llamaforcausallm__model__layers__48__self_attn__q_proj__weight", "model.layers.48.self_attn.k_proj.weight": "llamaforcausallm__model__layers__48__self_attn__k_proj__weight", "model.layers.48.self_attn.v_proj.weight": "llamaforcausallm__model__layers__48__self_attn__v_proj__weight", "model.layers.48.self_attn.o_proj.weight": "llamaforcausallm__model__layers__48__self_attn__o_proj__weight", "model.layers.48.mlp.gate_proj.weight": "llamaforcausallm__model__layers__48__mlp__gate_proj__weight", "model.layers.48.mlp.up_proj.weight": "llamaforcausallm__model__layers__48__mlp__up_proj__weight", "model.layers.48.mlp.down_proj.weight": "llamaforcausallm__model__layers__48__mlp__down_proj__weight", "model.layers.48.input_layernorm.weight": "llamaforcausallm__model__layers__48__input_layernorm__weight", "model.layers.48.post_attention_layernorm.weight": "llamaforcausallm__model__layers__48__post_attention_layernorm__weight", "model.layers.49.self_attn.q_proj.weight": "llamaforcausallm__model__layers__49__self_attn__q_proj__weight", "model.layers.49.self_attn.k_proj.weight": "llamaforcausallm__model__layers__49__self_attn__k_proj__weight", "model.layers.49.self_attn.v_proj.weight": "llamaforcausallm__model__layers__49__self_attn__v_proj__weight", "model.layers.49.self_attn.o_proj.weight": "llamaforcausallm__model__layers__49__self_attn__o_proj__weight", "model.layers.49.mlp.gate_proj.weight": "llamaforcausallm__model__layers__49__mlp__gate_proj__weight", "model.layers.49.mlp.up_proj.weight": "llamaforcausallm__model__layers__49__mlp__up_proj__weight", "model.layers.49.mlp.down_proj.weight": "llamaforcausallm__model__layers__49__mlp__down_proj__weight", "model.layers.49.input_layernorm.weight": "llamaforcausallm__model__layers__49__input_layernorm__weight", "model.layers.49.post_attention_layernorm.weight": "llamaforcausallm__model__layers__49__post_attention_layernorm__weight", "model.layers.50.self_attn.q_proj.weight": "llamaforcausallm__model__layers__50__self_attn__q_proj__weight", "model.layers.50.self_attn.k_proj.weight": "llamaforcausallm__model__layers__50__self_attn__k_proj__weight", "model.layers.50.self_attn.v_proj.weight": "llamaforcausallm__model__layers__50__self_attn__v_proj__weight", "model.layers.50.self_attn.o_proj.weight": "llamaforcausallm__model__layers__50__self_attn__o_proj__weight", "model.layers.50.mlp.gate_proj.weight": "llamaforcausallm__model__layers__50__mlp__gate_proj__weight", "model.layers.50.mlp.up_proj.weight": "llamaforcausallm__model__layers__50__mlp__up_proj__weight", "model.layers.50.mlp.down_proj.weight": "llamaforcausallm__model__layers__50__mlp__down_proj__weight", "model.layers.50.input_layernorm.weight": "llamaforcausallm__model__layers__50__input_layernorm__weight", "model.layers.50.post_attention_layernorm.weight": "llamaforcausallm__model__layers__50__post_attention_layernorm__weight", "model.layers.51.self_attn.q_proj.weight": "llamaforcausallm__model__layers__51__self_attn__q_proj__weight", "model.layers.51.self_attn.k_proj.weight": "llamaforcausallm__model__layers__51__self_attn__k_proj__weight", "model.layers.51.self_attn.v_proj.weight": "llamaforcausallm__model__layers__51__self_attn__v_proj__weight", "model.layers.51.self_attn.o_proj.weight": "llamaforcausallm__model__layers__51__self_attn__o_proj__weight", "model.layers.51.mlp.gate_proj.weight": "llamaforcausallm__model__layers__51__mlp__gate_proj__weight", "model.layers.51.mlp.up_proj.weight": "llamaforcausallm__model__layers__51__mlp__up_proj__weight", "model.layers.51.mlp.down_proj.weight": "llamaforcausallm__model__layers__51__mlp__down_proj__weight", "model.layers.51.input_layernorm.weight": "llamaforcausallm__model__layers__51__input_layernorm__weight", "model.layers.51.post_attention_layernorm.weight": "llamaforcausallm__model__layers__51__post_attention_layernorm__weight", "model.layers.52.self_attn.q_proj.weight": "llamaforcausallm__model__layers__52__self_attn__q_proj__weight", "model.layers.52.self_attn.k_proj.weight": "llamaforcausallm__model__layers__52__self_attn__k_proj__weight", "model.layers.52.self_attn.v_proj.weight": "llamaforcausallm__model__layers__52__self_attn__v_proj__weight", "model.layers.52.self_attn.o_proj.weight": "llamaforcausallm__model__layers__52__self_attn__o_proj__weight", "model.layers.52.mlp.gate_proj.weight": "llamaforcausallm__model__layers__52__mlp__gate_proj__weight", "model.layers.52.mlp.up_proj.weight": "llamaforcausallm__model__layers__52__mlp__up_proj__weight", "model.layers.52.mlp.down_proj.weight": "llamaforcausallm__model__layers__52__mlp__down_proj__weight", "model.layers.52.input_layernorm.weight": "llamaforcausallm__model__layers__52__input_layernorm__weight", "model.layers.52.post_attention_layernorm.weight": "llamaforcausallm__model__layers__52__post_attention_layernorm__weight", "model.layers.53.self_attn.q_proj.weight": "llamaforcausallm__model__layers__53__self_attn__q_proj__weight", "model.layers.53.self_attn.k_proj.weight": "llamaforcausallm__model__layers__53__self_attn__k_proj__weight", "model.layers.53.self_attn.v_proj.weight": "llamaforcausallm__model__layers__53__self_attn__v_proj__weight", "model.layers.53.self_attn.o_proj.weight": "llamaforcausallm__model__layers__53__self_attn__o_proj__weight", "model.layers.53.mlp.gate_proj.weight": "llamaforcausallm__model__layers__53__mlp__gate_proj__weight", "model.layers.53.mlp.up_proj.weight": "llamaforcausallm__model__layers__53__mlp__up_proj__weight", "model.layers.53.mlp.down_proj.weight": "llamaforcausallm__model__layers__53__mlp__down_proj__weight", "model.layers.53.input_layernorm.weight": "llamaforcausallm__model__layers__53__input_layernorm__weight", "model.layers.53.post_attention_layernorm.weight": "llamaforcausallm__model__layers__53__post_attention_layernorm__weight", "model.layers.54.self_attn.q_proj.weight": "llamaforcausallm__model__layers__54__self_attn__q_proj__weight", "model.layers.54.self_attn.k_proj.weight": "llamaforcausallm__model__layers__54__self_attn__k_proj__weight", "model.layers.54.self_attn.v_proj.weight": "llamaforcausallm__model__layers__54__self_attn__v_proj__weight", "model.layers.54.self_attn.o_proj.weight": "llamaforcausallm__model__layers__54__self_attn__o_proj__weight", "model.layers.54.mlp.gate_proj.weight": "llamaforcausallm__model__layers__54__mlp__gate_proj__weight", "model.layers.54.mlp.up_proj.weight": "llamaforcausallm__model__layers__54__mlp__up_proj__weight", "model.layers.54.mlp.down_proj.weight": "llamaforcausallm__model__layers__54__mlp__down_proj__weight", "model.layers.54.input_layernorm.weight": "llamaforcausallm__model__layers__54__input_layernorm__weight", "model.layers.54.post_attention_layernorm.weight": "llamaforcausallm__model__layers__54__post_attention_layernorm__weight", "model.layers.55.self_attn.q_proj.weight": "llamaforcausallm__model__layers__55__self_attn__q_proj__weight", "model.layers.55.self_attn.k_proj.weight": "llamaforcausallm__model__layers__55__self_attn__k_proj__weight", "model.layers.55.self_attn.v_proj.weight": "llamaforcausallm__model__layers__55__self_attn__v_proj__weight", "model.layers.55.self_attn.o_proj.weight": "llamaforcausallm__model__layers__55__self_attn__o_proj__weight", "model.layers.55.mlp.gate_proj.weight": "llamaforcausallm__model__layers__55__mlp__gate_proj__weight", "model.layers.55.mlp.up_proj.weight": "llamaforcausallm__model__layers__55__mlp__up_proj__weight", "model.layers.55.mlp.down_proj.weight": "llamaforcausallm__model__layers__55__mlp__down_proj__weight", "model.layers.55.input_layernorm.weight": "llamaforcausallm__model__layers__55__input_layernorm__weight", "model.layers.55.post_attention_layernorm.weight": "llamaforcausallm__model__layers__55__post_attention_layernorm__weight", "model.layers.56.self_attn.q_proj.weight": "llamaforcausallm__model__layers__56__self_attn__q_proj__weight", "model.layers.56.self_attn.k_proj.weight": "llamaforcausallm__model__layers__56__self_attn__k_proj__weight", "model.layers.56.self_attn.v_proj.weight": "llamaforcausallm__model__layers__56__self_attn__v_proj__weight", "model.layers.56.self_attn.o_proj.weight": "llamaforcausallm__model__layers__56__self_attn__o_proj__weight", "model.layers.56.mlp.gate_proj.weight": "llamaforcausallm__model__layers__56__mlp__gate_proj__weight", "model.layers.56.mlp.up_proj.weight": "llamaforcausallm__model__layers__56__mlp__up_proj__weight", "model.layers.56.mlp.down_proj.weight": "llamaforcausallm__model__layers__56__mlp__down_proj__weight", "model.layers.56.input_layernorm.weight": "llamaforcausallm__model__layers__56__input_layernorm__weight", "model.layers.56.post_attention_layernorm.weight": "llamaforcausallm__model__layers__56__post_attention_layernorm__weight", "model.layers.57.self_attn.q_proj.weight": "llamaforcausallm__model__layers__57__self_attn__q_proj__weight", "model.layers.57.self_attn.k_proj.weight": "llamaforcausallm__model__layers__57__self_attn__k_proj__weight", "model.layers.57.self_attn.v_proj.weight": "llamaforcausallm__model__layers__57__self_attn__v_proj__weight", "model.layers.57.self_attn.o_proj.weight": "llamaforcausallm__model__layers__57__self_attn__o_proj__weight", "model.layers.57.mlp.gate_proj.weight": "llamaforcausallm__model__layers__57__mlp__gate_proj__weight", "model.layers.57.mlp.up_proj.weight": "llamaforcausallm__model__layers__57__mlp__up_proj__weight", "model.layers.57.mlp.down_proj.weight": "llamaforcausallm__model__layers__57__mlp__down_proj__weight", "model.layers.57.input_layernorm.weight": "llamaforcausallm__model__layers__57__input_layernorm__weight", "model.layers.57.post_attention_layernorm.weight": "llamaforcausallm__model__layers__57__post_attention_layernorm__weight", "model.layers.58.self_attn.q_proj.weight": "llamaforcausallm__model__layers__58__self_attn__q_proj__weight", "model.layers.58.self_attn.k_proj.weight": "llamaforcausallm__model__layers__58__self_attn__k_proj__weight", "model.layers.58.self_attn.v_proj.weight": "llamaforcausallm__model__layers__58__self_attn__v_proj__weight", "model.layers.58.self_attn.o_proj.weight": "llamaforcausallm__model__layers__58__self_attn__o_proj__weight", "model.layers.58.mlp.gate_proj.weight": "llamaforcausallm__model__layers__58__mlp__gate_proj__weight", "model.layers.58.mlp.up_proj.weight": "llamaforcausallm__model__layers__58__mlp__up_proj__weight", "model.layers.58.mlp.down_proj.weight": "llamaforcausallm__model__layers__58__mlp__down_proj__weight", "model.layers.58.input_layernorm.weight": "llamaforcausallm__model__layers__58__input_layernorm__weight", "model.layers.58.post_attention_layernorm.weight": "llamaforcausallm__model__layers__58__post_attention_layernorm__weight", "model.layers.59.self_attn.q_proj.weight": "llamaforcausallm__model__layers__59__self_attn__q_proj__weight", "model.layers.59.self_attn.k_proj.weight": "llamaforcausallm__model__layers__59__self_attn__k_proj__weight", "model.layers.59.self_attn.v_proj.weight": "llamaforcausallm__model__layers__59__self_attn__v_proj__weight", "model.layers.59.self_attn.o_proj.weight": "llamaforcausallm__model__layers__59__self_attn__o_proj__weight", "model.layers.59.mlp.gate_proj.weight": "llamaforcausallm__model__layers__59__mlp__gate_proj__weight", "model.layers.59.mlp.up_proj.weight": "llamaforcausallm__model__layers__59__mlp__up_proj__weight", "model.layers.59.mlp.down_proj.weight": "llamaforcausallm__model__layers__59__mlp__down_proj__weight", "model.layers.59.input_layernorm.weight": "llamaforcausallm__model__layers__59__input_layernorm__weight", "model.layers.59.post_attention_layernorm.weight": "llamaforcausallm__model__layers__59__post_attention_layernorm__weight", "model.layers.60.self_attn.q_proj.weight": "llamaforcausallm__model__layers__60__self_attn__q_proj__weight", "model.layers.60.self_attn.k_proj.weight": "llamaforcausallm__model__layers__60__self_attn__k_proj__weight", "model.layers.60.self_attn.v_proj.weight": "llamaforcausallm__model__layers__60__self_attn__v_proj__weight", "model.layers.60.self_attn.o_proj.weight": "llamaforcausallm__model__layers__60__self_attn__o_proj__weight", "model.layers.60.mlp.gate_proj.weight": "llamaforcausallm__model__layers__60__mlp__gate_proj__weight", "model.layers.60.mlp.up_proj.weight": "llamaforcausallm__model__layers__60__mlp__up_proj__weight", "model.layers.60.mlp.down_proj.weight": "llamaforcausallm__model__layers__60__mlp__down_proj__weight", "model.layers.60.input_layernorm.weight": "llamaforcausallm__model__layers__60__input_layernorm__weight", "model.layers.60.post_attention_layernorm.weight": "llamaforcausallm__model__layers__60__post_attention_layernorm__weight", "model.layers.61.self_attn.q_proj.weight": "llamaforcausallm__model__layers__61__self_attn__q_proj__weight", "model.layers.61.self_attn.k_proj.weight": "llamaforcausallm__model__layers__61__self_attn__k_proj__weight", "model.layers.61.self_attn.v_proj.weight": "llamaforcausallm__model__layers__61__self_attn__v_proj__weight", "model.layers.61.self_attn.o_proj.weight": "llamaforcausallm__model__layers__61__self_attn__o_proj__weight", "model.layers.61.mlp.gate_proj.weight": "llamaforcausallm__model__layers__61__mlp__gate_proj__weight", "model.layers.61.mlp.up_proj.weight": "llamaforcausallm__model__layers__61__mlp__up_proj__weight", "model.layers.61.mlp.down_proj.weight": "llamaforcausallm__model__layers__61__mlp__down_proj__weight", "model.layers.61.input_layernorm.weight": "llamaforcausallm__model__layers__61__input_layernorm__weight", "model.layers.61.post_attention_layernorm.weight": "llamaforcausallm__model__layers__61__post_attention_layernorm__weight", "model.layers.62.self_attn.q_proj.weight": "llamaforcausallm__model__layers__62__self_attn__q_proj__weight", "model.layers.62.self_attn.k_proj.weight": "llamaforcausallm__model__layers__62__self_attn__k_proj__weight", "model.layers.62.self_attn.v_proj.weight": "llamaforcausallm__model__layers__62__self_attn__v_proj__weight", "model.layers.62.self_attn.o_proj.weight": "llamaforcausallm__model__layers__62__self_attn__o_proj__weight", "model.layers.62.mlp.gate_proj.weight": "llamaforcausallm__model__layers__62__mlp__gate_proj__weight", "model.layers.62.mlp.up_proj.weight": "llamaforcausallm__model__layers__62__mlp__up_proj__weight", "model.layers.62.mlp.down_proj.weight": "llamaforcausallm__model__layers__62__mlp__down_proj__weight", "model.layers.62.input_layernorm.weight": "llamaforcausallm__model__layers__62__input_layernorm__weight", "model.layers.62.post_attention_layernorm.weight": "llamaforcausallm__model__layers__62__post_attention_layernorm__weight", "model.layers.63.self_attn.q_proj.weight": "llamaforcausallm__model__layers__63__self_attn__q_proj__weight", "model.layers.63.self_attn.k_proj.weight": "llamaforcausallm__model__layers__63__self_attn__k_proj__weight", "model.layers.63.self_attn.v_proj.weight": "llamaforcausallm__model__layers__63__self_attn__v_proj__weight", "model.layers.63.self_attn.o_proj.weight": "llamaforcausallm__model__layers__63__self_attn__o_proj__weight", "model.layers.63.mlp.gate_proj.weight": "llamaforcausallm__model__layers__63__mlp__gate_proj__weight", "model.layers.63.mlp.up_proj.weight": "llamaforcausallm__model__layers__63__mlp__up_proj__weight", "model.layers.63.mlp.down_proj.weight": "llamaforcausallm__model__layers__63__mlp__down_proj__weight", "model.layers.63.input_layernorm.weight": "llamaforcausallm__model__layers__63__input_layernorm__weight", "model.layers.63.post_attention_layernorm.weight": "llamaforcausallm__model__layers__63__post_attention_layernorm__weight", "model.layers.64.self_attn.q_proj.weight": "llamaforcausallm__model__layers__64__self_attn__q_proj__weight", "model.layers.64.self_attn.k_proj.weight": "llamaforcausallm__model__layers__64__self_attn__k_proj__weight", "model.layers.64.self_attn.v_proj.weight": "llamaforcausallm__model__layers__64__self_attn__v_proj__weight", "model.layers.64.self_attn.o_proj.weight": "llamaforcausallm__model__layers__64__self_attn__o_proj__weight", "model.layers.64.mlp.gate_proj.weight": "llamaforcausallm__model__layers__64__mlp__gate_proj__weight", "model.layers.64.mlp.up_proj.weight": "llamaforcausallm__model__layers__64__mlp__up_proj__weight", "model.layers.64.mlp.down_proj.weight": "llamaforcausallm__model__layers__64__mlp__down_proj__weight", "model.layers.64.input_layernorm.weight": "llamaforcausallm__model__layers__64__input_layernorm__weight", "model.layers.64.post_attention_layernorm.weight": "llamaforcausallm__model__layers__64__post_attention_layernorm__weight", "model.layers.65.self_attn.q_proj.weight": "llamaforcausallm__model__layers__65__self_attn__q_proj__weight", "model.layers.65.self_attn.k_proj.weight": "llamaforcausallm__model__layers__65__self_attn__k_proj__weight", "model.layers.65.self_attn.v_proj.weight": "llamaforcausallm__model__layers__65__self_attn__v_proj__weight", "model.layers.65.self_attn.o_proj.weight": "llamaforcausallm__model__layers__65__self_attn__o_proj__weight", "model.layers.65.mlp.gate_proj.weight": "llamaforcausallm__model__layers__65__mlp__gate_proj__weight", "model.layers.65.mlp.up_proj.weight": "llamaforcausallm__model__layers__65__mlp__up_proj__weight", "model.layers.65.mlp.down_proj.weight": "llamaforcausallm__model__layers__65__mlp__down_proj__weight", "model.layers.65.input_layernorm.weight": "llamaforcausallm__model__layers__65__input_layernorm__weight", "model.layers.65.post_attention_layernorm.weight": "llamaforcausallm__model__layers__65__post_attention_layernorm__weight", "model.layers.66.self_attn.q_proj.weight": "llamaforcausallm__model__layers__66__self_attn__q_proj__weight", "model.layers.66.self_attn.k_proj.weight": "llamaforcausallm__model__layers__66__self_attn__k_proj__weight", "model.layers.66.self_attn.v_proj.weight": "llamaforcausallm__model__layers__66__self_attn__v_proj__weight", "model.layers.66.self_attn.o_proj.weight": "llamaforcausallm__model__layers__66__self_attn__o_proj__weight", "model.layers.66.mlp.gate_proj.weight": "llamaforcausallm__model__layers__66__mlp__gate_proj__weight", "model.layers.66.mlp.up_proj.weight": "llamaforcausallm__model__layers__66__mlp__up_proj__weight", "model.layers.66.mlp.down_proj.weight": "llamaforcausallm__model__layers__66__mlp__down_proj__weight", "model.layers.66.input_layernorm.weight": "llamaforcausallm__model__layers__66__input_layernorm__weight", "model.layers.66.post_attention_layernorm.weight": "llamaforcausallm__model__layers__66__post_attention_layernorm__weight", "model.layers.67.self_attn.q_proj.weight": "llamaforcausallm__model__layers__67__self_attn__q_proj__weight", "model.layers.67.self_attn.k_proj.weight": "llamaforcausallm__model__layers__67__self_attn__k_proj__weight", "model.layers.67.self_attn.v_proj.weight": "llamaforcausallm__model__layers__67__self_attn__v_proj__weight", "model.layers.67.self_attn.o_proj.weight": "llamaforcausallm__model__layers__67__self_attn__o_proj__weight", "model.layers.67.mlp.gate_proj.weight": "llamaforcausallm__model__layers__67__mlp__gate_proj__weight", "model.layers.67.mlp.up_proj.weight": "llamaforcausallm__model__layers__67__mlp__up_proj__weight", "model.layers.67.mlp.down_proj.weight": "llamaforcausallm__model__layers__67__mlp__down_proj__weight", "model.layers.67.input_layernorm.weight": "llamaforcausallm__model__layers__67__input_layernorm__weight", "model.layers.67.post_attention_layernorm.weight": "llamaforcausallm__model__layers__67__post_attention_layernorm__weight", "model.layers.68.self_attn.q_proj.weight": "llamaforcausallm__model__layers__68__self_attn__q_proj__weight", "model.layers.68.self_attn.k_proj.weight": "llamaforcausallm__model__layers__68__self_attn__k_proj__weight", "model.layers.68.self_attn.v_proj.weight": "llamaforcausallm__model__layers__68__self_attn__v_proj__weight", "model.layers.68.self_attn.o_proj.weight": "llamaforcausallm__model__layers__68__self_attn__o_proj__weight", "model.layers.68.mlp.gate_proj.weight": "llamaforcausallm__model__layers__68__mlp__gate_proj__weight", "model.layers.68.mlp.up_proj.weight": "llamaforcausallm__model__layers__68__mlp__up_proj__weight", "model.layers.68.mlp.down_proj.weight": "llamaforcausallm__model__layers__68__mlp__down_proj__weight", "model.layers.68.input_layernorm.weight": "llamaforcausallm__model__layers__68__input_layernorm__weight", "model.layers.68.post_attention_layernorm.weight": "llamaforcausallm__model__layers__68__post_attention_layernorm__weight", "model.layers.69.self_attn.q_proj.weight": "llamaforcausallm__model__layers__69__self_attn__q_proj__weight", "model.layers.69.self_attn.k_proj.weight": "llamaforcausallm__model__layers__69__self_attn__k_proj__weight", "model.layers.69.self_attn.v_proj.weight": "llamaforcausallm__model__layers__69__self_attn__v_proj__weight", "model.layers.69.self_attn.o_proj.weight": "llamaforcausallm__model__layers__69__self_attn__o_proj__weight", "model.layers.69.mlp.gate_proj.weight": "llamaforcausallm__model__layers__69__mlp__gate_proj__weight", "model.layers.69.mlp.up_proj.weight": "llamaforcausallm__model__layers__69__mlp__up_proj__weight", "model.layers.69.mlp.down_proj.weight": "llamaforcausallm__model__layers__69__mlp__down_proj__weight", "model.layers.69.input_layernorm.weight": "llamaforcausallm__model__layers__69__input_layernorm__weight", "model.layers.69.post_attention_layernorm.weight": "llamaforcausallm__model__layers__69__post_attention_layernorm__weight", "model.layers.70.self_attn.q_proj.weight": "llamaforcausallm__model__layers__70__self_attn__q_proj__weight", "model.layers.70.self_attn.k_proj.weight": "llamaforcausallm__model__layers__70__self_attn__k_proj__weight", "model.layers.70.self_attn.v_proj.weight": "llamaforcausallm__model__layers__70__self_attn__v_proj__weight", "model.layers.70.self_attn.o_proj.weight": "llamaforcausallm__model__layers__70__self_attn__o_proj__weight", "model.layers.70.mlp.gate_proj.weight": "llamaforcausallm__model__layers__70__mlp__gate_proj__weight", "model.layers.70.mlp.up_proj.weight": "llamaforcausallm__model__layers__70__mlp__up_proj__weight", "model.layers.70.mlp.down_proj.weight": "llamaforcausallm__model__layers__70__mlp__down_proj__weight", "model.layers.70.input_layernorm.weight": "llamaforcausallm__model__layers__70__input_layernorm__weight", "model.layers.70.post_attention_layernorm.weight": "llamaforcausallm__model__layers__70__post_attention_layernorm__weight", "model.layers.71.self_attn.q_proj.weight": "llamaforcausallm__model__layers__71__self_attn__q_proj__weight", "model.layers.71.self_attn.k_proj.weight": "llamaforcausallm__model__layers__71__self_attn__k_proj__weight", "model.layers.71.self_attn.v_proj.weight": "llamaforcausallm__model__layers__71__self_attn__v_proj__weight", "model.layers.71.self_attn.o_proj.weight": "llamaforcausallm__model__layers__71__self_attn__o_proj__weight", "model.layers.71.mlp.gate_proj.weight": "llamaforcausallm__model__layers__71__mlp__gate_proj__weight", "model.layers.71.mlp.up_proj.weight": "llamaforcausallm__model__layers__71__mlp__up_proj__weight", "model.layers.71.mlp.down_proj.weight": "llamaforcausallm__model__layers__71__mlp__down_proj__weight", "model.layers.71.input_layernorm.weight": "llamaforcausallm__model__layers__71__input_layernorm__weight", "model.layers.71.post_attention_layernorm.weight": "llamaforcausallm__model__layers__71__post_attention_layernorm__weight", "model.layers.72.self_attn.q_proj.weight": "llamaforcausallm__model__layers__72__self_attn__q_proj__weight", "model.layers.72.self_attn.k_proj.weight": "llamaforcausallm__model__layers__72__self_attn__k_proj__weight", "model.layers.72.self_attn.v_proj.weight": "llamaforcausallm__model__layers__72__self_attn__v_proj__weight", "model.layers.72.self_attn.o_proj.weight": "llamaforcausallm__model__layers__72__self_attn__o_proj__weight", "model.layers.72.mlp.gate_proj.weight": "llamaforcausallm__model__layers__72__mlp__gate_proj__weight", "model.layers.72.mlp.up_proj.weight": "llamaforcausallm__model__layers__72__mlp__up_proj__weight", "model.layers.72.mlp.down_proj.weight": "llamaforcausallm__model__layers__72__mlp__down_proj__weight", "model.layers.72.input_layernorm.weight": "llamaforcausallm__model__layers__72__input_layernorm__weight", "model.layers.72.post_attention_layernorm.weight": "llamaforcausallm__model__layers__72__post_attention_layernorm__weight", "model.layers.73.self_attn.q_proj.weight": "llamaforcausallm__model__layers__73__self_attn__q_proj__weight", "model.layers.73.self_attn.k_proj.weight": "llamaforcausallm__model__layers__73__self_attn__k_proj__weight", "model.layers.73.self_attn.v_proj.weight": "llamaforcausallm__model__layers__73__self_attn__v_proj__weight", "model.layers.73.self_attn.o_proj.weight": "llamaforcausallm__model__layers__73__self_attn__o_proj__weight", "model.layers.73.mlp.gate_proj.weight": "llamaforcausallm__model__layers__73__mlp__gate_proj__weight", "model.layers.73.mlp.up_proj.weight": "llamaforcausallm__model__layers__73__mlp__up_proj__weight", "model.layers.73.mlp.down_proj.weight": "llamaforcausallm__model__layers__73__mlp__down_proj__weight", "model.layers.73.input_layernorm.weight": "llamaforcausallm__model__layers__73__input_layernorm__weight", "model.layers.73.post_attention_layernorm.weight": "llamaforcausallm__model__layers__73__post_attention_layernorm__weight", "model.layers.74.self_attn.q_proj.weight": "llamaforcausallm__model__layers__74__self_attn__q_proj__weight", "model.layers.74.self_attn.k_proj.weight": "llamaforcausallm__model__layers__74__self_attn__k_proj__weight", "model.layers.74.self_attn.v_proj.weight": "llamaforcausallm__model__layers__74__self_attn__v_proj__weight", "model.layers.74.self_attn.o_proj.weight": "llamaforcausallm__model__layers__74__self_attn__o_proj__weight", "model.layers.74.mlp.gate_proj.weight": "llamaforcausallm__model__layers__74__mlp__gate_proj__weight", "model.layers.74.mlp.up_proj.weight": "llamaforcausallm__model__layers__74__mlp__up_proj__weight", "model.layers.74.mlp.down_proj.weight": "llamaforcausallm__model__layers__74__mlp__down_proj__weight", "model.layers.74.input_layernorm.weight": "llamaforcausallm__model__layers__74__input_layernorm__weight", "model.layers.74.post_attention_layernorm.weight": "llamaforcausallm__model__layers__74__post_attention_layernorm__weight", "model.layers.75.self_attn.q_proj.weight": "llamaforcausallm__model__layers__75__self_attn__q_proj__weight", "model.layers.75.self_attn.k_proj.weight": "llamaforcausallm__model__layers__75__self_attn__k_proj__weight", "model.layers.75.self_attn.v_proj.weight": "llamaforcausallm__model__layers__75__self_attn__v_proj__weight", "model.layers.75.self_attn.o_proj.weight": "llamaforcausallm__model__layers__75__self_attn__o_proj__weight", "model.layers.75.mlp.gate_proj.weight": "llamaforcausallm__model__layers__75__mlp__gate_proj__weight", "model.layers.75.mlp.up_proj.weight": "llamaforcausallm__model__layers__75__mlp__up_proj__weight", "model.layers.75.mlp.down_proj.weight": "llamaforcausallm__model__layers__75__mlp__down_proj__weight", "model.layers.75.input_layernorm.weight": "llamaforcausallm__model__layers__75__input_layernorm__weight", "model.layers.75.post_attention_layernorm.weight": "llamaforcausallm__model__layers__75__post_attention_layernorm__weight", "model.layers.76.self_attn.q_proj.weight": "llamaforcausallm__model__layers__76__self_attn__q_proj__weight", "model.layers.76.self_attn.k_proj.weight": "llamaforcausallm__model__layers__76__self_attn__k_proj__weight", "model.layers.76.self_attn.v_proj.weight": "llamaforcausallm__model__layers__76__self_attn__v_proj__weight", "model.layers.76.self_attn.o_proj.weight": "llamaforcausallm__model__layers__76__self_attn__o_proj__weight", "model.layers.76.mlp.gate_proj.weight": "llamaforcausallm__model__layers__76__mlp__gate_proj__weight", "model.layers.76.mlp.up_proj.weight": "llamaforcausallm__model__layers__76__mlp__up_proj__weight", "model.layers.76.mlp.down_proj.weight": "llamaforcausallm__model__layers__76__mlp__down_proj__weight", "model.layers.76.input_layernorm.weight": "llamaforcausallm__model__layers__76__input_layernorm__weight", "model.layers.76.post_attention_layernorm.weight": "llamaforcausallm__model__layers__76__post_attention_layernorm__weight", "model.layers.77.self_attn.q_proj.weight": "llamaforcausallm__model__layers__77__self_attn__q_proj__weight", "model.layers.77.self_attn.k_proj.weight": "llamaforcausallm__model__layers__77__self_attn__k_proj__weight", "model.layers.77.self_attn.v_proj.weight": "llamaforcausallm__model__layers__77__self_attn__v_proj__weight", "model.layers.77.self_attn.o_proj.weight": "llamaforcausallm__model__layers__77__self_attn__o_proj__weight", "model.layers.77.mlp.gate_proj.weight": "llamaforcausallm__model__layers__77__mlp__gate_proj__weight", "model.layers.77.mlp.up_proj.weight": "llamaforcausallm__model__layers__77__mlp__up_proj__weight", "model.layers.77.mlp.down_proj.weight": "llamaforcausallm__model__layers__77__mlp__down_proj__weight", "model.layers.77.input_layernorm.weight": "llamaforcausallm__model__layers__77__input_layernorm__weight", "model.layers.77.post_attention_layernorm.weight": "llamaforcausallm__model__layers__77__post_attention_layernorm__weight", "model.layers.78.self_attn.q_proj.weight": "llamaforcausallm__model__layers__78__self_attn__q_proj__weight", "model.layers.78.self_attn.k_proj.weight": "llamaforcausallm__model__layers__78__self_attn__k_proj__weight", "model.layers.78.self_attn.v_proj.weight": "llamaforcausallm__model__layers__78__self_attn__v_proj__weight", "model.layers.78.self_attn.o_proj.weight": "llamaforcausallm__model__layers__78__self_attn__o_proj__weight", "model.layers.78.mlp.gate_proj.weight": "llamaforcausallm__model__layers__78__mlp__gate_proj__weight", "model.layers.78.mlp.up_proj.weight": "llamaforcausallm__model__layers__78__mlp__up_proj__weight", "model.layers.78.mlp.down_proj.weight": "llamaforcausallm__model__layers__78__mlp__down_proj__weight", "model.layers.78.input_layernorm.weight": "llamaforcausallm__model__layers__78__input_layernorm__weight", "model.layers.78.post_attention_layernorm.weight": "llamaforcausallm__model__layers__78__post_attention_layernorm__weight", "model.layers.79.self_attn.q_proj.weight": "llamaforcausallm__model__layers__79__self_attn__q_proj__weight", "model.layers.79.self_attn.k_proj.weight": "llamaforcausallm__model__layers__79__self_attn__k_proj__weight", "model.layers.79.self_attn.v_proj.weight": "llamaforcausallm__model__layers__79__self_attn__v_proj__weight", "model.layers.79.self_attn.o_proj.weight": "llamaforcausallm__model__layers__79__self_attn__o_proj__weight", "model.layers.79.mlp.gate_proj.weight": "llamaforcausallm__model__layers__79__mlp__gate_proj__weight", "model.layers.79.mlp.up_proj.weight": "llamaforcausallm__model__layers__79__mlp__up_proj__weight", "model.layers.79.mlp.down_proj.weight": "llamaforcausallm__model__layers__79__mlp__down_proj__weight", "model.layers.79.input_layernorm.weight": "llamaforcausallm__model__layers__79__input_layernorm__weight", "model.layers.79.post_attention_layernorm.weight": "llamaforcausallm__model__layers__79__post_attention_layernorm__weight", "model.norm.weight": "llamaforcausallm__model__norm__weight", "lm_head.weight": "llamaforcausallm__lm_head__weight"}
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cd4bd3344487b8282d748f5f54bb0a0ebeb54a1a2b0129c375e136c12238920
3
+ size 1103319
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": null,
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": true
42
+ }