kearney commited on
Commit
fdf6615
·
1 Parent(s): 60f68c1

End of training

Browse files
Files changed (4) hide show
  1. README.md +13 -13
  2. config.json +26 -38
  3. model.safetensors +2 -2
  4. training_args.bin +1 -1
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  license: apache-2.0
3
- base_model: bert-base-uncased
4
  tags:
5
  - generated_from_trainer
6
  metrics:
@@ -15,10 +15,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # office-character
17
 
18
- This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 2.5407
21
- - Accuracy: 0.1139
22
 
23
  ## Model description
24
 
@@ -37,25 +37,25 @@ More information needed
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
- - learning_rate: 1e-06
41
  - train_batch_size: 20
42
  - eval_batch_size: 20
43
  - seed: 42
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
46
- - num_epochs: 1.5
47
 
48
  ### Training results
49
 
50
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
51
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
52
- | 2.5927 | 0.2 | 120 | 2.5634 | 0.1007 |
53
- | 2.5723 | 0.4 | 240 | 2.5531 | 0.1054 |
54
- | 2.5774 | 0.6 | 360 | 2.5483 | 0.1043 |
55
- | 2.5738 | 0.8 | 480 | 2.5448 | 0.1095 |
56
- | 2.5755 | 1.0 | 600 | 2.5432 | 0.1095 |
57
- | 2.5688 | 1.2 | 720 | 2.5416 | 0.1132 |
58
- | 2.5632 | 1.4 | 840 | 2.5407 | 0.1139 |
59
 
60
 
61
  ### Framework versions
 
1
  ---
2
  license: apache-2.0
3
+ base_model: distilbert-base-uncased
4
  tags:
5
  - generated_from_trainer
6
  metrics:
 
15
 
16
  # office-character
17
 
18
+ This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 2.0632
21
+ - Accuracy: 0.2481
22
 
23
  ## Model description
24
 
 
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
+ - learning_rate: 5e-05
41
  - train_batch_size: 20
42
  - eval_batch_size: 20
43
  - seed: 42
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
46
+ - num_epochs: 3
47
 
48
  ### Training results
49
 
50
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
51
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
52
+ | 2.0701 | 0.43 | 120 | 2.0673 | 0.16 |
53
+ | 2.0385 | 0.86 | 240 | 2.0445 | 0.1825 |
54
+ | 1.94 | 1.29 | 360 | 2.0320 | 0.2156 |
55
+ | 1.8606 | 1.71 | 480 | 2.0290 | 0.225 |
56
+ | 1.7839 | 2.14 | 600 | 2.0392 | 0.2431 |
57
+ | 1.5807 | 2.57 | 720 | 2.0641 | 0.24 |
58
+ | 1.5676 | 3.0 | 840 | 2.0632 | 0.2481 |
59
 
60
 
61
  ### Framework versions
config.json CHANGED
@@ -1,57 +1,45 @@
1
  {
2
- "_name_or_path": "bert-base-uncased",
 
3
  "architectures": [
4
- "BertForSequenceClassification"
5
  ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "classifier_dropout": null,
8
- "gradient_checkpointing": false,
9
- "hidden_act": "gelu",
10
- "hidden_dropout_prob": 0.1,
11
- "hidden_size": 768,
12
  "id2label": {
13
  "0": "Andy",
14
  "1": "Angela",
15
- "2": "Darryl",
16
- "3": "Dwight",
17
- "4": "Erin",
18
- "5": "Jim",
19
- "6": "Kelly",
20
- "7": "Kevin",
21
- "8": "Michael",
22
- "9": "Oscar",
23
- "10": "Pam",
24
- "11": "Phyllis",
25
- "12": "Ryan"
26
  },
27
  "initializer_range": 0.02,
28
- "intermediate_size": 3072,
29
  "label2id": {
30
  "Andy": 0,
31
  "Angela": 1,
32
- "Darryl": 2,
33
- "Dwight": 3,
34
- "Erin": 4,
35
- "Jim": 5,
36
- "Kelly": 6,
37
- "Kevin": 7,
38
- "Michael": 8,
39
- "Oscar": 9,
40
- "Pam": 10,
41
- "Phyllis": 11,
42
- "Ryan": 12
43
  },
44
- "layer_norm_eps": 1e-12,
45
  "max_position_embeddings": 512,
46
- "model_type": "bert",
47
- "num_attention_heads": 12,
48
- "num_hidden_layers": 12,
49
  "pad_token_id": 0,
50
- "position_embedding_type": "absolute",
51
  "problem_type": "single_label_classification",
 
 
 
 
52
  "torch_dtype": "float32",
53
  "transformers_version": "4.32.1",
54
- "type_vocab_size": 2,
55
- "use_cache": true,
56
  "vocab_size": 30522
57
  }
 
1
  {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
  "architectures": [
5
+ "DistilBertForSequenceClassification"
6
  ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
 
 
11
  "id2label": {
12
  "0": "Andy",
13
  "1": "Angela",
14
+ "2": "Dwight",
15
+ "3": "Jim",
16
+ "4": "Kevin",
17
+ "5": "Michael",
18
+ "6": "Oscar",
19
+ "7": "Pam"
 
 
 
 
 
20
  },
21
  "initializer_range": 0.02,
 
22
  "label2id": {
23
  "Andy": 0,
24
  "Angela": 1,
25
+ "Dwight": 2,
26
+ "Jim": 3,
27
+ "Kevin": 4,
28
+ "Michael": 5,
29
+ "Oscar": 6,
30
+ "Pam": 7
 
 
 
 
 
31
  },
 
32
  "max_position_embeddings": 512,
33
+ "model_type": "distilbert",
34
+ "n_heads": 12,
35
+ "n_layers": 6,
36
  "pad_token_id": 0,
 
37
  "problem_type": "single_label_classification",
38
+ "qa_dropout": 0.1,
39
+ "seq_classif_dropout": 0.2,
40
+ "sinusoidal_pos_embds": false,
41
+ "tie_weights_": true,
42
  "torch_dtype": "float32",
43
  "transformers_version": "4.32.1",
 
 
44
  "vocab_size": 30522
45
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7439284d09a45f3cfe964f3bfe671e8c033d6ead5d39084ec717da854201195
3
- size 437992484
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e35e8f79cd2a7f47b004f270f7add5e927359a3694b804c90af09162f93e3cb
3
+ size 267851024
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd55bd9630a3e593db1b61e77b3caf29661dbd33dd3736a23bb39271e7ed029c
3
  size 4027
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0de371ea6a4d604e9bb82ebc2fd3fc78dca32ef2c6eeec57aab9d1f46631a8b8
3
  size 4027