pszemraj commited on
Commit
a3d4f33
1 Parent(s): 475fce6

Upload config.json

Browse files
Files changed (1) hide show
  1. config.json +2 -12
config.json CHANGED
@@ -4,19 +4,12 @@
4
  "GPT2LMHeadModel"
5
  ],
6
  "attn_pdrop": 0.1,
7
- "auto_map": {
8
- "AutoModelForCausalLM": "modeling_gpt2.GPT2LMHeadModel"
9
- },
10
  "bos_token_id": 50256,
11
- "do_sample": true,
12
  "embd_pdrop": 0.1,
13
  "eos_token_id": 50256,
14
  "gradient_checkpointing": false,
15
  "initializer_range": 0.02,
16
  "layer_norm_epsilon": 1e-05,
17
- "length_penalty": 2.3,
18
- "max_length": 64,
19
- "min_length": 16,
20
  "model_type": "gpt2",
21
  "n_ctx": 2048,
22
  "n_embd": 2048,
@@ -24,10 +17,7 @@
24
  "n_inner": null,
25
  "n_layer": 24,
26
  "n_positions": 2048,
27
- "no_repeat_ngram_size": 4,
28
- "reorder_and_upcast_attn": false,
29
  "resid_pdrop": 0.1,
30
- "scale_attn_by_inverse_layer_idx": false,
31
  "scale_attn_weights": true,
32
  "summary_activation": null,
33
  "summary_first_dropout": 0.1,
@@ -35,7 +25,7 @@
35
  "summary_type": "cls_index",
36
  "summary_use_proj": true,
37
  "torch_dtype": "float32",
38
- "transformers_version": "4.18.0",
39
- "use_cache": false,
40
  "vocab_size": 100000
41
  }
 
4
  "GPT2LMHeadModel"
5
  ],
6
  "attn_pdrop": 0.1,
 
 
 
7
  "bos_token_id": 50256,
 
8
  "embd_pdrop": 0.1,
9
  "eos_token_id": 50256,
10
  "gradient_checkpointing": false,
11
  "initializer_range": 0.02,
12
  "layer_norm_epsilon": 1e-05,
 
 
 
13
  "model_type": "gpt2",
14
  "n_ctx": 2048,
15
  "n_embd": 2048,
 
17
  "n_inner": null,
18
  "n_layer": 24,
19
  "n_positions": 2048,
 
 
20
  "resid_pdrop": 0.1,
 
21
  "scale_attn_weights": true,
22
  "summary_activation": null,
23
  "summary_first_dropout": 0.1,
 
25
  "summary_type": "cls_index",
26
  "summary_use_proj": true,
27
  "torch_dtype": "float32",
28
+ "transformers_version": "4.10.3",
29
+ "use_cache": true,
30
  "vocab_size": 100000
31
  }