valhalla commited on
Commit
ed82d0c
1 Parent(s): 4eef10a

Upload folder using huggingface_hub

Browse files
commit-message=600/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "MaskGiTUViT_v2",
3
+ "_version": "0.0.1",
4
+ "adapter_proj_dim": 32,
5
+ "add_cond_embeds": true,
6
+ "add_micro_cond_embeds": true,
7
+ "attention_dropout": 0.0,
8
+ "block_num_heads": 12,
9
+ "block_out_channels": [
10
+ 768
11
+ ],
12
+ "codebook_size": 8192,
13
+ "cond_embed_dim": 768,
14
+ "decay": 0.9999,
15
+ "encoder_hidden_size": 768,
16
+ "force_down_up_sample": true,
17
+ "hidden_dropout": 0.0,
18
+ "hidden_size": 1024,
19
+ "in_channels": 768,
20
+ "intermediate_size": 2816,
21
+ "inv_gamma": 1.0,
22
+ "is_adapter_shared": false,
23
+ "layer_norm_eps": 1e-06,
24
+ "ln_elementwise_affine": true,
25
+ "mask_token_id": 8255,
26
+ "micro_cond_embed_dim": 1280,
27
+ "micro_cond_encode_dim": 256,
28
+ "min_decay": 0.0,
29
+ "norm_type": "rmsnorm",
30
+ "num_attention_heads": 16,
31
+ "num_hidden_layers": 22,
32
+ "num_res_blocks": 3,
33
+ "optimization_step": 600,
34
+ "power": 0.6666666666666666,
35
+ "update_after_step": 0,
36
+ "use_adapter": true,
37
+ "use_bias": false,
38
+ "use_ema_warmup": false,
39
+ "use_fused_mlp": false,
40
+ "use_fused_residual_norm": true,
41
+ "vocab_size": 8256
42
+ }
commit-message=600/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ab52cbfbc5dc3ad5bfd2702c9966243a457b01bb0d18d885c1505ac52e355e6
3
+ size 2439015333