PingVortex commited on
Commit
8c8c3ec
·
verified ·
1 Parent(s): 54d97ea

Upload 5 files

Browse files
Files changed (5) hide show
  1. config.json +45 -0
  2. merges.txt +0 -0
  3. model.safetensors +3 -0
  4. tokenizer.json +0 -0
  5. vocab.json +0 -0
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 1,
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "id2label": {
12
+ "0": "LABEL_0"
13
+ },
14
+ "initializer_range": 0.02,
15
+ "label2id": {
16
+ "LABEL_0": 0
17
+ },
18
+ "layer_norm_epsilon": 1e-05,
19
+ "model_type": "gpt2",
20
+ "n_ctx": 1024,
21
+ "n_embd": 768,
22
+ "n_head": 12,
23
+ "n_inner": null,
24
+ "n_layer": 6,
25
+ "n_positions": 1024,
26
+ "reorder_and_upcast_attn": false,
27
+ "resid_pdrop": 0.1,
28
+ "scale_attn_by_inverse_layer_idx": false,
29
+ "scale_attn_weights": true,
30
+ "summary_activation": null,
31
+ "summary_first_dropout": 0.1,
32
+ "summary_proj_to_labels": true,
33
+ "summary_type": "cls_index",
34
+ "summary_use_proj": true,
35
+ "task_specific_params": {
36
+ "text-generation": {
37
+ "do_sample": true,
38
+ "max_length": 50
39
+ }
40
+ },
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.51.3",
43
+ "use_cache": true,
44
+ "vocab_size": 50257
45
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ea725b8b49ebf10ea0375d2291f1773a33e8a5881b33149a27477d9e2322ef5
3
+ size 327657928
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
vocab.json ADDED
The diff for this file is too large to render. See raw diff