luoruipu commited on
Commit
c850f8e
·
1 Parent(s): a4edfdf

update model weight

Browse files
config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "_name_or_path": "/mnt/bn/luoruipu-disk/checkpoints/stable-valley-13b-v1",
3
  "architectures": [
4
- "LlamaForCausalLM"
5
  ],
6
  "bos_token_id": 1,
7
  "eos_token_id": 2,
@@ -16,7 +16,7 @@
16
  "mm_use_im_start_end": true,
17
  "mm_vision_select_layer": -2,
18
  "mm_vision_tower": "openai/clip-vit-large-patch14",
19
- "model_type": "llama",
20
  "num_attention_heads": 40,
21
  "num_hidden_layers": 40,
22
  "pad_token_id": 0,
 
1
  {
2
  "_name_or_path": "/mnt/bn/luoruipu-disk/checkpoints/stable-valley-13b-v1",
3
  "architectures": [
4
+ "ValleyLlamaForCausalLM"
5
  ],
6
  "bos_token_id": 1,
7
  "eos_token_id": 2,
 
16
  "mm_use_im_start_end": true,
17
  "mm_vision_select_layer": -2,
18
  "mm_vision_tower": "openai/clip-vit-large-patch14",
19
+ "model_type": "Valley",
20
  "num_attention_heads": 40,
21
  "num_hidden_layers": 40,
22
  "pad_token_id": 0,
pytorch_model-00003-of-00003.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e806a991b99f97a660f0fedb9df1db37000a04195655d471f77bf8818816eb06
3
- size 6179055305
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a69123dbce20f1df880f8c442ade4a352c1f61c54b18700b4f784e221ca81634
3
+ size 6796057306
pytorch_model.bin.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 26031882240
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "pytorch_model-00003-of-00003.bin",
@@ -405,6 +405,400 @@
405
  "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
406
  "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
407
  "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
408
- "model.norm.weight": "pytorch_model-00003-of-00003.bin"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
409
  }
410
  }
 
1
  {
2
  "metadata": {
3
+ "total_size": 26648739848
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "pytorch_model-00003-of-00003.bin",
 
405
  "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
406
  "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
407
  "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
408
+ "model.mm_projector.bias": "pytorch_model-00003-of-00003.bin",
409
+ "model.mm_projector.weight": "pytorch_model-00003-of-00003.bin",
410
+ "model.norm.weight": "pytorch_model-00003-of-00003.bin",
411
+ "model.vision_tower.vision_model.embeddings.class_embedding": "pytorch_model-00003-of-00003.bin",
412
+ "model.vision_tower.vision_model.embeddings.patch_embedding.weight": "pytorch_model-00003-of-00003.bin",
413
+ "model.vision_tower.vision_model.embeddings.position_embedding.weight": "pytorch_model-00003-of-00003.bin",
414
+ "model.vision_tower.vision_model.embeddings.position_ids": "pytorch_model-00003-of-00003.bin",
415
+ "model.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
416
+ "model.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
417
+ "model.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
418
+ "model.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
419
+ "model.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
420
+ "model.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
421
+ "model.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
422
+ "model.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
423
+ "model.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
424
+ "model.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
425
+ "model.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
426
+ "model.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
427
+ "model.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
428
+ "model.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
429
+ "model.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
430
+ "model.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
431
+ "model.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
432
+ "model.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
433
+ "model.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
434
+ "model.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
435
+ "model.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
436
+ "model.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
437
+ "model.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
438
+ "model.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
439
+ "model.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
440
+ "model.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
441
+ "model.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
442
+ "model.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
443
+ "model.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
444
+ "model.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
445
+ "model.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
446
+ "model.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
447
+ "model.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
448
+ "model.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
449
+ "model.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
450
+ "model.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
451
+ "model.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
452
+ "model.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
453
+ "model.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
454
+ "model.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
455
+ "model.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
456
+ "model.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
457
+ "model.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
458
+ "model.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
459
+ "model.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
460
+ "model.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
461
+ "model.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
462
+ "model.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
463
+ "model.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
464
+ "model.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
465
+ "model.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
466
+ "model.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
467
+ "model.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
468
+ "model.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
469
+ "model.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
470
+ "model.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
471
+ "model.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
472
+ "model.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
473
+ "model.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
474
+ "model.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
475
+ "model.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
476
+ "model.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
477
+ "model.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
478
+ "model.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
479
+ "model.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
480
+ "model.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
481
+ "model.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
482
+ "model.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
483
+ "model.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
484
+ "model.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
485
+ "model.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
486
+ "model.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
487
+ "model.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
488
+ "model.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
489
+ "model.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
490
+ "model.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
491
+ "model.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
492
+ "model.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
493
+ "model.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
494
+ "model.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
495
+ "model.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
496
+ "model.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
497
+ "model.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
498
+ "model.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
499
+ "model.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
500
+ "model.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
501
+ "model.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
502
+ "model.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
503
+ "model.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
504
+ "model.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
505
+ "model.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
506
+ "model.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
507
+ "model.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
508
+ "model.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
509
+ "model.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
510
+ "model.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
511
+ "model.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
512
+ "model.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
513
+ "model.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
514
+ "model.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
515
+ "model.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
516
+ "model.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
517
+ "model.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
518
+ "model.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
519
+ "model.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
520
+ "model.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
521
+ "model.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
522
+ "model.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
523
+ "model.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
524
+ "model.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
525
+ "model.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
526
+ "model.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
527
+ "model.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
528
+ "model.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
529
+ "model.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
530
+ "model.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
531
+ "model.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
532
+ "model.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
533
+ "model.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
534
+ "model.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
535
+ "model.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
536
+ "model.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
537
+ "model.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
538
+ "model.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
539
+ "model.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
540
+ "model.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
541
+ "model.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
542
+ "model.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
543
+ "model.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
544
+ "model.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
545
+ "model.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
546
+ "model.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
547
+ "model.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
548
+ "model.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
549
+ "model.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
550
+ "model.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
551
+ "model.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
552
+ "model.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
553
+ "model.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
554
+ "model.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
555
+ "model.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
556
+ "model.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
557
+ "model.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
558
+ "model.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
559
+ "model.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
560
+ "model.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
561
+ "model.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
562
+ "model.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
563
+ "model.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
564
+ "model.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
565
+ "model.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
566
+ "model.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
567
+ "model.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
568
+ "model.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
569
+ "model.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
570
+ "model.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
571
+ "model.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
572
+ "model.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
573
+ "model.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
574
+ "model.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
575
+ "model.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
576
+ "model.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
577
+ "model.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
578
+ "model.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
579
+ "model.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
580
+ "model.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
581
+ "model.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
582
+ "model.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
583
+ "model.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
584
+ "model.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
585
+ "model.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
586
+ "model.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
587
+ "model.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
588
+ "model.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
589
+ "model.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
590
+ "model.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
591
+ "model.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
592
+ "model.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
593
+ "model.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
594
+ "model.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
595
+ "model.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
596
+ "model.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
597
+ "model.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
598
+ "model.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
599
+ "model.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
600
+ "model.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
601
+ "model.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
602
+ "model.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
603
+ "model.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
604
+ "model.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
605
+ "model.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
606
+ "model.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
607
+ "model.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
608
+ "model.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
609
+ "model.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
610
+ "model.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
611
+ "model.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
612
+ "model.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
613
+ "model.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
614
+ "model.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
615
+ "model.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
616
+ "model.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
617
+ "model.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
618
+ "model.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
619
+ "model.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
620
+ "model.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
621
+ "model.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
622
+ "model.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
623
+ "model.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
624
+ "model.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
625
+ "model.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
626
+ "model.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
627
+ "model.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
628
+ "model.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
629
+ "model.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
630
+ "model.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
631
+ "model.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
632
+ "model.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
633
+ "model.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
634
+ "model.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
635
+ "model.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
636
+ "model.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
637
+ "model.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
638
+ "model.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
639
+ "model.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
640
+ "model.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
641
+ "model.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
642
+ "model.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
643
+ "model.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
644
+ "model.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
645
+ "model.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
646
+ "model.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
647
+ "model.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
648
+ "model.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
649
+ "model.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
650
+ "model.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
651
+ "model.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
652
+ "model.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
653
+ "model.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
654
+ "model.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
655
+ "model.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
656
+ "model.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
657
+ "model.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
658
+ "model.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
659
+ "model.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
660
+ "model.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
661
+ "model.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
662
+ "model.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
663
+ "model.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
664
+ "model.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
665
+ "model.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
666
+ "model.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
667
+ "model.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
668
+ "model.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
669
+ "model.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
670
+ "model.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
671
+ "model.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
672
+ "model.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
673
+ "model.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
674
+ "model.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
675
+ "model.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
676
+ "model.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
677
+ "model.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
678
+ "model.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
679
+ "model.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
680
+ "model.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
681
+ "model.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
682
+ "model.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
683
+ "model.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
684
+ "model.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
685
+ "model.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
686
+ "model.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
687
+ "model.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
688
+ "model.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
689
+ "model.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
690
+ "model.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
691
+ "model.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
692
+ "model.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
693
+ "model.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
694
+ "model.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
695
+ "model.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
696
+ "model.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
697
+ "model.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
698
+ "model.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
699
+ "model.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
700
+ "model.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
701
+ "model.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
702
+ "model.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
703
+ "model.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
704
+ "model.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
705
+ "model.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
706
+ "model.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
707
+ "model.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
708
+ "model.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
709
+ "model.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
710
+ "model.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
711
+ "model.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
712
+ "model.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
713
+ "model.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
714
+ "model.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
715
+ "model.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
716
+ "model.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
717
+ "model.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
718
+ "model.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
719
+ "model.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
720
+ "model.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
721
+ "model.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
722
+ "model.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
723
+ "model.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
724
+ "model.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
725
+ "model.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
726
+ "model.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
727
+ "model.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
728
+ "model.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
729
+ "model.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
730
+ "model.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
731
+ "model.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
732
+ "model.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
733
+ "model.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
734
+ "model.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
735
+ "model.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
736
+ "model.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
737
+ "model.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
738
+ "model.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
739
+ "model.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
740
+ "model.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
741
+ "model.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
742
+ "model.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
743
+ "model.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
744
+ "model.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
745
+ "model.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
746
+ "model.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
747
+ "model.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
748
+ "model.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
749
+ "model.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
750
+ "model.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
751
+ "model.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
752
+ "model.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
753
+ "model.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
754
+ "model.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
755
+ "model.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
756
+ "model.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
757
+ "model.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
758
+ "model.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
759
+ "model.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
760
+ "model.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
761
+ "model.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
762
+ "model.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
763
+ "model.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
764
+ "model.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
765
+ "model.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
766
+ "model.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
767
+ "model.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
768
+ "model.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
769
+ "model.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
770
+ "model.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
771
+ "model.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
772
+ "model.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
773
+ "model.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
774
+ "model.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
775
+ "model.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
776
+ "model.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
777
+ "model.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
778
+ "model.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
779
+ "model.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
780
+ "model.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
781
+ "model.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
782
+ "model.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
783
+ "model.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
784
+ "model.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
785
+ "model.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
786
+ "model.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
787
+ "model.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
788
+ "model.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
789
+ "model.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
790
+ "model.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
791
+ "model.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
792
+ "model.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
793
+ "model.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
794
+ "model.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
795
+ "model.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
796
+ "model.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
797
+ "model.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
798
+ "model.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
799
+ "model.vision_tower.vision_model.post_layernorm.bias": "pytorch_model-00003-of-00003.bin",
800
+ "model.vision_tower.vision_model.post_layernorm.weight": "pytorch_model-00003-of-00003.bin",
801
+ "model.vision_tower.vision_model.pre_layrnorm.bias": "pytorch_model-00003-of-00003.bin",
802
+ "model.vision_tower.vision_model.pre_layrnorm.weight": "pytorch_model-00003-of-00003.bin"
803
  }
804
  }