raulgdp commited on
Commit
a90212f
·
verified ·
1 Parent(s): c6cc867

Training in progress, epoch 1

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "NazaGara/NER-fine-tuned-BETO",
3
  "architectures": [
4
  "BertForTokenClassification"
5
  ],
@@ -10,82 +10,39 @@
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
  "id2label": {
13
- "0": "B_CANCER_CONCEPT",
14
- "1": "B_CHEMOTHERAPY",
15
- "2": "B_DATE",
16
- "3": "B_DRUG",
17
- "4": "B_FAMILY",
18
- "5": "B_FREQ",
19
- "6": "B_IMPLICIT_DATE",
20
- "7": "B_INTERVAL",
21
- "8": "B_METRIC",
22
- "9": "B_OCURRENCE_EVENT",
23
- "10": "B_QUANTITY",
24
- "11": "B_RADIOTHERAPY",
25
- "12": "B_SMOKER_STATUS",
26
- "13": "B_STAGE",
27
- "14": "B_SURGERY",
28
- "15": "B_TNM",
29
- "16": "I_CANCER_CONCEPT",
30
- "17": "I_DATE",
31
- "18": "I_DRUG",
32
- "19": "I_FAMILY",
33
- "20": "I_FREQ",
34
- "21": "I_IMPLICIT_DATE",
35
- "22": "I_INTERVAL",
36
- "23": "I_METRIC",
37
- "24": "I_OCURRENCE_EVENT",
38
- "25": "I_SMOKER_STATUS",
39
- "26": "I_STAGE",
40
- "27": "I_SURGERY",
41
- "28": "I_TNM",
42
- "29": "O"
43
  },
44
  "initializer_range": 0.02,
45
  "intermediate_size": 3072,
46
  "label2id": {
47
- "B_CANCER_CONCEPT": 0,
48
- "B_CHEMOTHERAPY": 1,
49
- "B_DATE": 2,
50
- "B_DRUG": 3,
51
- "B_FAMILY": 4,
52
- "B_FREQ": 5,
53
- "B_IMPLICIT_DATE": 6,
54
- "B_INTERVAL": 7,
55
- "B_METRIC": 8,
56
- "B_OCURRENCE_EVENT": 9,
57
- "B_QUANTITY": 10,
58
- "B_RADIOTHERAPY": 11,
59
- "B_SMOKER_STATUS": 12,
60
- "B_STAGE": 13,
61
- "B_SURGERY": 14,
62
- "B_TNM": 15,
63
- "I_CANCER_CONCEPT": 16,
64
- "I_DATE": 17,
65
- "I_DRUG": 18,
66
- "I_FAMILY": 19,
67
- "I_FREQ": 20,
68
- "I_IMPLICIT_DATE": 21,
69
- "I_INTERVAL": 22,
70
- "I_METRIC": 23,
71
- "I_OCURRENCE_EVENT": 24,
72
- "I_SMOKER_STATUS": 25,
73
- "I_STAGE": 26,
74
- "I_SURGERY": 27,
75
- "I_TNM": 28,
76
- "O": 29
77
  },
78
  "layer_norm_eps": 1e-12,
79
  "max_position_embeddings": 512,
80
  "model_type": "bert",
81
  "num_attention_heads": 12,
82
  "num_hidden_layers": 12,
83
- "output_past": true,
84
- "pad_token_id": 1,
85
  "position_embedding_type": "absolute",
86
  "torch_dtype": "float32",
87
  "transformers_version": "4.46.2",
88
  "type_vocab_size": 2,
89
  "use_cache": true,
90
- "vocab_size": 31002
91
  }
 
1
  {
2
+ "_name_or_path": "google-bert/bert-base-cased",
3
  "architectures": [
4
  "BertForTokenClassification"
5
  ],
 
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
  "id2label": {
13
+ "0": "O",
14
+ "1": "B-PER",
15
+ "2": "I-PER",
16
+ "3": "B-ORG",
17
+ "4": "I-ORG",
18
+ "5": "B-LOC",
19
+ "6": "I-LOC",
20
+ "7": "B-MISC",
21
+ "8": "I-MISC"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  },
23
  "initializer_range": 0.02,
24
  "intermediate_size": 3072,
25
  "label2id": {
26
+ "B-LOC": 5,
27
+ "B-MISC": 7,
28
+ "B-ORG": 3,
29
+ "B-PER": 1,
30
+ "I-LOC": 6,
31
+ "I-MISC": 8,
32
+ "I-ORG": 4,
33
+ "I-PER": 2,
34
+ "O": 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  },
36
  "layer_norm_eps": 1e-12,
37
  "max_position_embeddings": 512,
38
  "model_type": "bert",
39
  "num_attention_heads": 12,
40
  "num_hidden_layers": 12,
41
+ "pad_token_id": 0,
 
42
  "position_embedding_type": "absolute",
43
  "torch_dtype": "float32",
44
  "transformers_version": "4.46.2",
45
  "type_vocab_size": 2,
46
  "use_cache": true,
47
+ "vocab_size": 28996
48
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09088173a42d3b80d2e015ef1d10ed5011e4e5b74be66b29a7fa61958a51f26a
3
- size 437156776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05b9292b948245f5edc26607bf0bb9a0dada1b678d9d0969ce4471560ad86a71
3
+ size 430929740
runs/Nov16_00-54-18_1d16b391ae26/events.out.tfevents.1731718464.1d16b391ae26.326.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eec7d158c0f80d06b98606cf04934cb27e0c46b12bfac75acef4485b8fb15a6
3
+ size 6315
special_tokens_map.json CHANGED
@@ -1,37 +1,7 @@
1
  {
2
- "cls_token": {
3
- "content": "[CLS]",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "mask_token": {
10
- "content": "[MASK]",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": {
17
- "content": "[PAD]",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "sep_token": {
24
- "content": "[SEP]",
25
- "lstrip": false,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "unk_token": {
31
- "content": "[UNK]",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false
36
- }
37
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -2,39 +2,39 @@
2
  "add_prefix_space": true,
3
  "added_tokens_decoder": {
4
  "0": {
5
- "content": "[MASK]",
6
  "lstrip": false,
7
  "normalized": false,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  },
12
- "1": {
13
- "content": "[PAD]",
14
  "lstrip": false,
15
  "normalized": false,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
19
  },
20
- "3": {
21
- "content": "[UNK]",
22
  "lstrip": false,
23
  "normalized": false,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
27
  },
28
- "4": {
29
- "content": "[CLS]",
30
  "lstrip": false,
31
  "normalized": false,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
35
  },
36
- "5": {
37
- "content": "[SEP]",
38
  "lstrip": false,
39
  "normalized": false,
40
  "rstrip": false,
@@ -44,15 +44,12 @@
44
  },
45
  "clean_up_tokenization_spaces": false,
46
  "cls_token": "[CLS]",
47
- "do_basic_tokenize": true,
48
  "do_lower_case": false,
49
  "mask_token": "[MASK]",
50
  "model_max_length": 512,
51
- "never_split": null,
52
- "num_labels": 9,
53
  "pad_token": "[PAD]",
54
  "sep_token": "[SEP]",
55
- "strip_accents": false,
56
  "tokenize_chinese_chars": true,
57
  "tokenizer_class": "BertTokenizer",
58
  "unk_token": "[UNK]"
 
2
  "add_prefix_space": true,
3
  "added_tokens_decoder": {
4
  "0": {
5
+ "content": "[PAD]",
6
  "lstrip": false,
7
  "normalized": false,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  },
12
+ "100": {
13
+ "content": "[UNK]",
14
  "lstrip": false,
15
  "normalized": false,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
19
  },
20
+ "101": {
21
+ "content": "[CLS]",
22
  "lstrip": false,
23
  "normalized": false,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
27
  },
28
+ "102": {
29
+ "content": "[SEP]",
30
  "lstrip": false,
31
  "normalized": false,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
35
  },
36
+ "103": {
37
+ "content": "[MASK]",
38
  "lstrip": false,
39
  "normalized": false,
40
  "rstrip": false,
 
44
  },
45
  "clean_up_tokenization_spaces": false,
46
  "cls_token": "[CLS]",
 
47
  "do_lower_case": false,
48
  "mask_token": "[MASK]",
49
  "model_max_length": 512,
 
 
50
  "pad_token": "[PAD]",
51
  "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
  "tokenize_chinese_chars": true,
54
  "tokenizer_class": "BertTokenizer",
55
  "unk_token": "[UNK]"
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b4bf572f2a7ac155812882c84626cf338a191c1410ac8ff92bad2ca84b62f9d
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1822f4241d2b1e0457daee5e5c2c1e40d76f7d2f9140ea1d52f776183f0bad7
3
  size 5304
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff