|
{ |
|
"metadata": { |
|
"total_size": 14026295296 |
|
}, |
|
"weight_map": { |
|
"embed_out.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.embed_in.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.final_layer_norm.bias": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.final_layer_norm.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.0.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.0.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.0.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.0.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.0.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.0.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.0.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.0.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.1.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.1.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.1.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.1.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.1.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.1.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.1.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.1.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.10.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.10.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.10.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.10.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.10.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.10.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.10.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.10.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.11.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.11.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.11.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.11.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.11.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.11.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.11.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.11.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.12.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.12.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.12.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.12.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.12.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.12.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.12.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.12.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.13.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.13.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.13.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.13.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.13.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.13.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.13.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.13.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.14.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.14.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.14.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.14.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.14.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.14.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.14.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.14.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.15.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.15.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.15.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.15.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.15.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.15.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.15.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.15.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.16.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.16.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.16.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.16.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.16.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.16.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.16.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.16.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.17.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.17.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.17.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.17.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.17.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.17.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.17.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.17.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.18.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.18.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.18.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.18.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.18.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.18.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.18.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.18.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.19.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.19.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.19.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.19.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.19.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.19.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.19.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.19.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.2.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.2.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.2.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.2.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.2.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.2.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.2.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.2.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.20.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.20.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.20.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.20.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.20.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.20.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.20.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.20.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.21.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.21.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.21.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.21.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.21.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.21.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.21.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.21.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.22.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.22.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.22.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.22.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.22.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.22.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.22.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.22.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.23.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.23.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.23.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.23.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.23.mlp.out_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.23.mlp.packed_input_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.23.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.23.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.24.attention.dense.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.24.attention.query_key_value.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.24.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.24.attention.rotary_emb.scale": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.24.mlp.out_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.24.mlp.packed_input_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.24.post_attention_layernorm.bias": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.24.post_attention_layernorm.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.25.attention.dense.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.25.attention.query_key_value.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.25.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.25.attention.rotary_emb.scale": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.25.mlp.out_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.25.mlp.packed_input_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.25.post_attention_layernorm.bias": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.25.post_attention_layernorm.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.26.attention.dense.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.26.attention.query_key_value.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.26.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.26.attention.rotary_emb.scale": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.26.mlp.out_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.26.mlp.packed_input_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.26.post_attention_layernorm.bias": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.26.post_attention_layernorm.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.27.attention.dense.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.27.attention.query_key_value.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.27.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.27.attention.rotary_emb.scale": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.27.mlp.out_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.27.mlp.packed_input_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.27.post_attention_layernorm.bias": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.27.post_attention_layernorm.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.28.attention.dense.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.28.attention.query_key_value.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.28.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.28.attention.rotary_emb.scale": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.28.mlp.out_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.28.mlp.packed_input_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.28.post_attention_layernorm.bias": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.28.post_attention_layernorm.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.29.attention.dense.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.29.attention.query_key_value.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.29.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.29.attention.rotary_emb.scale": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.29.mlp.out_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.29.mlp.packed_input_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.29.post_attention_layernorm.bias": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.29.post_attention_layernorm.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.3.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.3.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.3.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.3.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.3.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.3.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.3.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.3.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.30.attention.dense.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.30.attention.query_key_value.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.30.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.30.attention.rotary_emb.scale": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.30.mlp.out_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.30.mlp.packed_input_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.30.post_attention_layernorm.bias": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.30.post_attention_layernorm.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.31.attention.dense.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.31.attention.query_key_value.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.31.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.31.attention.rotary_emb.scale": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.31.mlp.out_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.31.mlp.packed_input_proj.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.31.post_attention_layernorm.bias": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.31.post_attention_layernorm.weight": "pytorch_model.fp16-00002-of-00002.bin", |
|
"transformer.layers.4.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.4.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.4.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.4.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.4.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.4.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.4.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.4.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.5.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.5.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.5.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.5.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.5.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.5.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.5.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.5.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.6.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.6.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.6.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.6.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.6.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.6.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.6.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.6.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.7.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.7.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.7.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.7.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.7.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.7.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.7.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.7.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.8.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.8.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.8.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.8.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.8.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.8.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.8.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.8.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.9.attention.dense.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.9.attention.query_key_value.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.9.attention.rotary_emb.inv_freq": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.9.attention.rotary_emb.scale": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.9.mlp.out_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.9.mlp.packed_input_proj.weight": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.9.post_attention_layernorm.bias": "pytorch_model.fp16-00001-of-00002.bin", |
|
"transformer.layers.9.post_attention_layernorm.weight": "pytorch_model.fp16-00001-of-00002.bin" |
|
} |
|
} |
|
|