|
{ |
|
"metadata": { |
|
"total_size": 6444642304 |
|
}, |
|
"weight_map": { |
|
"logits_dense.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.embeddings.embed.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.0.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.1.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.10.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.11.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.12.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.13.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.13.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.13.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.13.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.13.mlp.down_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.13.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.13.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.13.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.13.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.13.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.13.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.13.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.13.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.14.cross_attention.k_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.cross_attention.o_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.cross_attention.q_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.cross_attention.v_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.mlp.down_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.pre_ca_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.pre_mlp_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.pre_sa_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.self_attention.k_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.self_attention.o_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.self_attention.q_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.14.self_attention.v_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.cross_attention.k_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.cross_attention.o_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.cross_attention.q_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.cross_attention.v_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.mlp.down_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.pre_ca_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.pre_mlp_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.pre_sa_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.self_attention.k_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.self_attention.o_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.self_attention.q_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.15.self_attention.v_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.cross_attention.k_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.cross_attention.o_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.cross_attention.q_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.cross_attention.v_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.mlp.down_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.pre_ca_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.pre_mlp_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.pre_sa_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.self_attention.k_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.self_attention.o_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.self_attention.q_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.16.self_attention.v_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.cross_attention.k_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.cross_attention.o_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.cross_attention.q_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.cross_attention.v_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.mlp.down_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.pre_ca_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.pre_mlp_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.pre_sa_norm.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.self_attention.k_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.self_attention.o_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.self_attention.q_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.17.self_attention.v_proj.weight": "model-00002-of-00002.safetensors", |
|
"model.decoder.layers.2.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.2.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.3.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.4.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.5.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.6.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.7.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.8.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.cross_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.cross_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.cross_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.cross_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.pre_ca_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.pre_mlp_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.layers.9.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.decoder.norm.weight": "model-00002-of-00002.safetensors", |
|
"model.encoder.embedding.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.0.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.0.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.0.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.0.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.0.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.0.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.0.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.1.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.1.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.1.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.1.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.1.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.1.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.1.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.10.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.10.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.10.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.10.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.10.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.10.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.10.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.11.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.11.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.11.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.11.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.11.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.11.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.11.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.2.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.2.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.2.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.2.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.2.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.2.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.2.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.3.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.3.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.3.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.3.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.3.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.3.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.3.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.4.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.4.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.4.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.4.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.4.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.4.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.4.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.5.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.5.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.5.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.5.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.5.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.5.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.5.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.6.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.6.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.6.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.6.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.6.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.6.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.6.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.7.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.7.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.7.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.7.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.7.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.7.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.7.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.8.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.8.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.8.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.8.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.8.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.8.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.8.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.9.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.9.post_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.9.pre_sa_norm.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.9.self_attention.k_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.9.self_attention.o_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.9.self_attention.q_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.layers.9.self_attention.v_proj.weight": "model-00001-of-00002.safetensors", |
|
"model.encoder.norm.weight": "model-00001-of-00002.safetensors" |
|
} |
|
} |
|
|