eacortes commited on
Commit
297a8a9
·
verified ·
1 Parent(s): b6e82c6

Upload 19 files

Browse files
Files changed (19) hide show
  1. README.md +313 -3
  2. config.json +473 -0
  3. configuration_modchembert.py +84 -0
  4. logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_bace_classification_epochs100_batch_size32_20250918_222031.log +349 -0
  5. logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_bbbp_epochs100_batch_size32_20250918_224419.log +393 -0
  6. logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_clintox_epochs100_batch_size32_20250919_005825.log +369 -0
  7. logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_hiv_epochs100_batch_size32_20250922_102847.log +339 -0
  8. logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_sider_epochs100_batch_size32_20250919_003207.log +361 -0
  9. logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_tox21_epochs100_batch_size32_20250918_231229.log +343 -0
  10. logs_modchembert_regression_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_bace_regression_epochs100_batch_size32_20250918_223910.log +329 -0
  11. logs_modchembert_regression_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_clearance_epochs100_batch_size32_20250919_000714.log +327 -0
  12. logs_modchembert_regression_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_delaney_epochs100_batch_size32_20250918_222034.log +373 -0
  13. logs_modchembert_regression_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_freesolv_epochs100_batch_size32_20250918_230313.log +353 -0
  14. logs_modchembert_regression_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_lipo_epochs100_batch_size32_20250918_231637.log +373 -0
  15. model.safetensors +3 -0
  16. modeling_modchembert.py +554 -0
  17. special_tokens_map.json +37 -0
  18. tokenizer.json +2554 -0
  19. tokenizer_config.json +60 -0
README.md CHANGED
@@ -1,3 +1,313 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: Derify/ModChemBERT-MLM
4
+ datasets:
5
+ - Derify/augmented_canonical_druglike_QED_Pfizer_15M
6
+ metrics:
7
+ - roc_auc
8
+ - rmse
9
+ library_name: transformers
10
+ tags:
11
+ - modernbert
12
+ - ModChemBERT
13
+ - cheminformatics
14
+ - chemical-language-model
15
+ - molecular-property-prediction
16
+ pipeline_tag: fill-mask
17
+ model-index:
18
+ - name: Derify/ModChemBERT-MLM
19
+ results:
20
+ - task:
21
+ type: text-classification
22
+ name: Classification (ROC AUC)
23
+ dataset:
24
+ name: BACE
25
+ type: BACE
26
+ metrics:
27
+ - type: roc_auc
28
+ value: 0.8224
29
+ - task:
30
+ type: text-classification
31
+ name: Classification (ROC AUC)
32
+ dataset:
33
+ name: BBBP
34
+ type: BBBP
35
+ metrics:
36
+ - type: roc_auc
37
+ value: 0.7402
38
+ - task:
39
+ type: text-classification
40
+ name: Classification (ROC AUC)
41
+ dataset:
42
+ name: CLINTOX
43
+ type: CLINTOX
44
+ metrics:
45
+ - type: roc_auc
46
+ value: 0.9820
47
+ - task:
48
+ type: text-classification
49
+ name: Classification (ROC AUC)
50
+ dataset:
51
+ name: HIV
52
+ type: HIV
53
+ metrics:
54
+ - type: roc_auc
55
+ value: 0.7702
56
+ - task:
57
+ type: text-classification
58
+ name: Classification (ROC AUC)
59
+ dataset:
60
+ name: SIDER
61
+ type: SIDER
62
+ metrics:
63
+ - type: roc_auc
64
+ value: 0.6303
65
+ - task:
66
+ type: text-classification
67
+ name: Classification (ROC AUC)
68
+ dataset:
69
+ name: TOX21
70
+ type: TOX21
71
+ metrics:
72
+ - type: roc_auc
73
+ value: 0.7360
74
+ - task:
75
+ type: regression
76
+ name: Regression (RMSE)
77
+ dataset:
78
+ name: BACE
79
+ type: BACE
80
+ metrics:
81
+ - type: rmse
82
+ value: 0.9931
83
+ - task:
84
+ type: regression
85
+ name: Regression (RMSE)
86
+ dataset:
87
+ name: CLEARANCE
88
+ type: CLEARANCE
89
+ metrics:
90
+ - type: rmse
91
+ value: 45.4951
92
+ - task:
93
+ type: regression
94
+ name: Regression (RMSE)
95
+ dataset:
96
+ name: ESOL
97
+ type: ESOL
98
+ metrics:
99
+ - type: rmse
100
+ value: 0.9319
101
+ - task:
102
+ type: regression
103
+ name: Regression (RMSE)
104
+ dataset:
105
+ name: FREESOLV
106
+ type: FREESOLV
107
+ metrics:
108
+ - type: rmse
109
+ value: 0.6049
110
+ - task:
111
+ type: regression
112
+ name: Regression (RMSE)
113
+ dataset:
114
+ name: LIPO
115
+ type: LIPO
116
+ metrics:
117
+ - type: rmse
118
+ value: 0.6874
119
+ ---
120
+
121
+ # ModChemBERT: ModernBERT as a Chemical Language Model
122
+ ModChemBERT is a ModernBERT-based chemical language model (CLM), trained on SMILES strings for masked language modeling (MLM) and downstream molecular property prediction (classification & regression).
123
+
124
+ ## Usage
125
+ ### Load Model
126
+ ```python
127
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
128
+
129
+ model_id = "Derify/ModChemBERT-MLM-DAPT"
130
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
131
+ model = AutoModelForMaskedLM.from_pretrained(
132
+ model_id,
133
+ trust_remote_code=True,
134
+ dtype="float16",
135
+ device_map="auto",
136
+ )
137
+ ```
138
+
139
+ ### Fill-Mask Pipeline
140
+ ```python
141
+ from transformers import pipeline
142
+
143
+ fill = pipeline("fill-mask", model=model, tokenizer=tokenizer)
144
+ print(fill("c1ccccc1[MASK]"))
145
+ ```
146
+
147
+ ## Intended Use
148
+ * Primary: Research and development for molecular property prediction, experimentation with pooling strategies, and as a foundational model for downstream applications.
149
+ * Appropriate for: Binary / multi-class classification (e.g., toxicity, activity) and single-task or multi-task regression (e.g., solubility, clearance) after fine-tuning.
150
+ * Not intended for generating novel molecules.
151
+
152
+ ## Limitations
153
+ - Out-of-domain performance may degrade for: very long (>128 token) SMILES, inorganic / organometallic compounds, polymers, or charged / enumerated tautomers are not well represented in training.
154
+ - No guarantee of synthesizability, safety, or biological efficacy.
155
+
156
+ ## Ethical Considerations & Responsible Use
157
+ - Potential biases arise from training corpora skewed to drug-like space.
158
+ - Do not deploy in clinical or regulatory settings without rigorous, domain-specific validation.
159
+
160
+ ## Architecture
161
+ - Backbone: ModernBERT
162
+ - Hidden size: 768
163
+ - Intermediate size: 1152
164
+ - Encoder Layers: 22
165
+ - Attention heads: 12
166
+ - Max sequence length: 256 tokens (MLM primarily trained with 128-token sequences)
167
+ - Vocabulary: BPE tokenizer using [MolFormer's vocab](https://github.com/emapco/ModChemBERT/blob/main/modchembert/tokenizers/molformer/vocab.json) (2362 tokens)
168
+
169
+ ## Pooling (Classifier / Regressor Head)
170
+ Kallergis et al. [1] demonstrated that the CLM embedding method prior to the prediction head can significantly impact downstream performance.
171
+
172
+ Behrendt et al. [2] noted that the last few layers contain task-specific information and that pooling methods leveraging information from multiple layers can enhance model performance. Their results further demonstrated that the `max_seq_mha` pooling method was particularly effective in low-data regimes, which is often the case for molecular property prediction tasks.
173
+
174
+ Multiple pooling strategies are supported by ModChemBERT to explore their impact on downstream performance:
175
+ - `cls`: Last layer [CLS]
176
+ - `mean`: Mean over last hidden layer
177
+ - `max_cls`: Max over last k layers of [CLS]
178
+ - `cls_mha`: MHA with [CLS] as query
179
+ - `max_seq_mha`: MHA with max pooled sequence as KV and max pooled [CLS] as query
180
+ - `sum_mean`: Sum over all layers then mean tokens
181
+ - `sum_sum`: Sum over all layers then sum tokens
182
+ - `mean_mean`: Mean over all layers then mean tokens
183
+ - `mean_sum`: Mean over all layers then sum tokens
184
+ - `max_seq_mean`: Max over last k layers then mean tokens
185
+
186
+ ## Training Pipeline
187
+ <div align="center">
188
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/656892962693fa22e18b5331/bxNbpgMkU8m60ypyEJoWQ.png" alt="ModChemBERT Training Pipeline" width="650"/>
189
+ </div>
190
+
191
+ ### Rationale for MTR Stage
192
+ Following Sultan et al. [3], multi-task regression (physicochemical properties) biases the latent space toward ADME-related representations prior to narrow TAFT specialization. Sultan et al. observed that MLM + DAPT (MTR) outperforms MLM-only, MTR-only, and MTR + DAPT (MTR).
193
+
194
+ ### Checkpoint Averaging Motivation
195
+ Inspired by ModernBERT [4], JaColBERTv2.5 [5], and Llama 3.1 [6], where results show that model merging can enhance generalization or performance while mitigating overfitting to any single fine-tune or annealing checkpoint.
196
+
197
+ ## Datasets
198
+ - Pretraining: [Derify/augmented_canonical_druglike_QED_Pfizer_15M](https://huggingface.co/datasets/Derify/augmented_canonical_druglike_QED_Pfizer_15M)
199
+ - Domain Adaptive Pretraining (DAPT) & Task Adaptive Fine-tuning (TAFT): ADME + AstraZeneca datasets (10 tasks) with scaffold splits from DA4MT pipeline (see [domain-adaptation-molecular-transformers](https://github.com/emapco/ModChemBERT/tree/main/domain-adaptation-molecular-transformers))
200
+ - Benchmarking: ChemBERTa-3 [7] tasks (BACE, BBBP, TOX21, HIV, SIDER, CLINTOX for classification; ESOL, FREESOLV, LIPO, BACE, CLEARANCE for regression)
201
+
202
+ ## Benchmarking
203
+ Benchmarks were conducted with the ChemBERTa-3 framework using DeepChem scaffold splits. Each task was trained for 100 epochs with 3 random seeds.
204
+
205
+ ### Evaluation Methodology
206
+ - Classification Metric: ROC AUC.
207
+ - Regression Metric: RMSE.
208
+ - Aggregation: Mean ± standard deviation of the triplicate results.
209
+ - Input Constraints: SMILES truncated / filtered to ≤200 tokens, following the MolFormer paper's recommendation.
210
+
211
+ ### Results
212
+ <details><summary>Click to expand</summary>
213
+
214
+ #### Classification Datasets (ROC AUC - Higher is better)
215
+
216
+ | Model | BACE↑ | BBBP↑ | CLINTOX↑ | HIV↑ | SIDER↑ | TOX21↑ | AVG† |
217
+ | ---------------------------------------------------------------------------- | ----------------- | ----------------- | --------------------- | --------------------- | --------------------- | ----------------- | ------ |
218
+ | **Tasks** | 1 | 1 | 2 | 1 | 27 | 12 | |
219
+ | [ChemBERTa-100M-MLM](https://huggingface.co/DeepChem/ChemBERTa-100M-MLM)* | 0.781 ± 0.019 | 0.700 ± 0.027 | 0.979 ± 0.022 | 0.740 ± 0.013 | 0.611 ± 0.002 | 0.718 ± 0.011 | 0.7548 |
220
+ | [c3-MoLFormer-1.1B](https://huggingface.co/DeepChem/MoLFormer-c3-1.1B)* | 0.819 ± 0.019 | 0.735 ± 0.019 | 0.839 ± 0.013 | 0.762 ± 0.005 | 0.618 ± 0.005 | 0.723 ± 0.012 | 0.7493 |
221
+ | MoLFormer-LHPC* | **0.887 ± 0.004** | **0.908 ± 0.013** | 0.993 ± 0.004 | 0.750 ± 0.003 | 0.622 ± 0.007 | **0.791 ± 0.014** | 0.8252 |
222
+ | ------------------------- | ----------------- | ----------------- | ------------------- | ------------------- | ------------------- | ----------------- | ------ |
223
+ | [MLM](https://huggingface.co/Derify/ModChemBERT-MLM) | 0.8065 ± 0.0103 | 0.7222 ± 0.0150 | 0.9709 ± 0.0227 | ***0.7800 ± 0.0133*** | 0.6419 ± 0.0113 | 0.7400 ± 0.0044 | 0.7769 |
224
+ | [MLM + DAPT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT) | 0.8224 ± 0.0156 | 0.7402 ± 0.0095 | 0.9820 ± 0.0138 | 0.7702 ± 0.0020 | 0.6303 ± 0.0039 | 0.7360 ± 0.0036 | 0.7802 |
225
+ | [MLM + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-TAFT) | 0.7924 ± 0.0155 | 0.7282 ± 0.0058 | 0.9725 ± 0.0213 | 0.7770 ± 0.0047 | 0.6542 ± 0.0128 | *0.7646 ± 0.0039* | 0.7815 |
226
+ | [MLM + DAPT + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT-TAFT) | 0.8213 ± 0.0051 | 0.7356 ± 0.0094 | 0.9664 ± 0.0202 | 0.7750 ± 0.0048 | 0.6415 ± 0.0094 | 0.7263 ± 0.0036 | 0.7777 |
227
+ | [MLM + DAPT + TAFT OPT](https://huggingface.co/Derify/ModChemBERT) | *0.8346 ± 0.0045* | *0.7573 ± 0.0120* | ***0.9938 ± 0.0017*** | 0.7737 ± 0.0034 | ***0.6600 ± 0.0061*** | 0.7518 ± 0.0047 | 0.7952 |
228
+
229
+ #### Regression Datasets (RMSE - Lower is better)
230
+
231
+ | Model | BACE↓ | CLEARANCE↓ | ESOL↓ | FREESOLV↓ | LIPO↓ | AVG‡ |
232
+ | ---------------------------------------------------------------------------- | --------------------- | ---------------------- | --------------------- | --------------------- | --------------------- | ---------------- |
233
+ | **Tasks** | 1 | 1 | 1 | 1 | 1 | |
234
+ | [ChemBERTa-100M-MLM](https://huggingface.co/DeepChem/ChemBERTa-100M-MLM)* | 1.011 ± 0.038 | 51.582 ± 3.079 | 0.920 ± 0.011 | 0.536 ± 0.016 | 0.758 ± 0.013 | 0.8063 / 10.9614 |
235
+ | [c3-MoLFormer-1.1B](https://huggingface.co/DeepChem/MoLFormer-c3-1.1B)* | 1.094 ± 0.126 | 52.058 ± 2.767 | 0.829 ± 0.019 | 0.572 ± 0.023 | 0.728 ± 0.016 | 0.8058 / 11.0562 |
236
+ | MoLFormer-LHPC* | 1.201 ± 0.100 | 45.74 ± 2.637 | 0.848 ± 0.031 | 0.683 ± 0.040 | 0.895 ± 0.080 | 0.9068 / 9.8734 |
237
+ | ------------------------- | ------------------- | -------------------- | ------------------- | ------------------- | ------------------- | ---------------- |
238
+ | [MLM](https://huggingface.co/Derify/ModChemBERT-MLM) | 1.0893 ± 0.1319 | 49.0005 ± 1.2787 | 0.8456 ± 0.0406 | 0.5491 ± 0.0134 | 0.7147 ± 0.0062 | 0.7997 / 10.4398 |
239
+ | [MLM + DAPT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT) | 0.9931 ± 0.0258 | 45.4951 ± 0.7112 | 0.9319 ± 0.0153 | 0.6049 ± 0.0666 | 0.6874 ± 0.0040 | 0.8043 / 9.7425 |
240
+ | [MLM + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-TAFT) | 1.0304 ± 0.1146 | 47.8418 ± 0.4070 | ***0.7669 ± 0.0024*** | 0.5293 ± 0.0267 | 0.6708 ± 0.0074 | 0.7493 / 10.1678 |
241
+ | [MLM + DAPT + TAFT](https://huggingface.co/Derify/ModChemBERT-MLM-DAPT-TAFT) | 0.9713 ± 0.0224 | ***42.8010 ± 3.3475*** | 0.8169 ± 0.0268 | 0.5445 ± 0.0257 | 0.6820 ± 0.0028 | 0.7537 / 9.1631 |
242
+ | [MLM + DAPT + TAFT OPT](https://huggingface.co/Derify/ModChemBERT) | ***0.9665 ± 0.0250*** | 44.0137 ± 1.1110 | 0.8158 ± 0.0115 | ***0.4979 ± 0.0158*** | ***0.6505 ± 0.0126*** | 0.7327 / 9.3889 |
243
+
244
+ **Bold** indicates the best result in the column; *italic* indicates the best result among ModChemBERT checkpoints.<br/>
245
+ \* Published results from the ChemBERTa-3 [7] paper for optimized chemical language models using DeepChem scaffold splits.<br/>
246
+ † AVG column shows the mean score across all classification tasks.<br/>
247
+ ‡ AVG column shows the mean scores across all regression tasks without and with the clearance score.
248
+
249
+ </details>
250
+
251
+ ## Optimized ModChemBERT Hyperparameters
252
+
253
+ <details><summary>Click to expand</summary>
254
+
255
+ ### TAFT Datasets
256
+ Optimal parameters (per dataset) for the `MLM + DAPT + TAFT OPT` merged model:
257
+
258
+ | Dataset | Learning Rate | Batch Size | Warmup Ratio | Classifier Pooling | Last k Layers |
259
+ | ---------------------- | ------------- | ---------- | ------------ | ------------------ | ------------- |
260
+ | adme_microsom_stab_h | 3e-5 | 8 | 0.0 | max_seq_mean | 5 |
261
+ | adme_microsom_stab_r | 3e-5 | 16 | 0.2 | max_cls | 3 |
262
+ | adme_permeability | 3e-5 | 8 | 0.0 | max_cls | 3 |
263
+ | adme_ppb_h | 1e-5 | 32 | 0.1 | max_seq_mean | 5 |
264
+ | adme_ppb_r | 1e-5 | 32 | 0.0 | sum_mean | N/A |
265
+ | adme_solubility | 3e-5 | 32 | 0.0 | sum_mean | N/A |
266
+ | astrazeneca_CL | 3e-5 | 8 | 0.1 | max_seq_mha | 3 |
267
+ | astrazeneca_LogD74 | 1e-5 | 8 | 0.0 | max_seq_mean | 5 |
268
+ | astrazeneca_PPB | 1e-5 | 32 | 0.0 | max_cls | 3 |
269
+ | astrazeneca_Solubility | 1e-5 | 32 | 0.0 | max_seq_mean | 5 |
270
+
271
+ ### Benchmarking Datasets
272
+ Optimal parameters (per dataset) for the `MLM + DAPT + TAFT OPT` merged model:
273
+
274
+ | Dataset | Batch Size | Classifier Pooling | Last k Layers | Pooling Attention Dropout | Classifier Dropout | Embedding Dropout |
275
+ | ------------------- | ---------- | ------------------ | ------------- | ------------------------- | ------------------ | ----------------- |
276
+ | bace_classification | 32 | max_seq_mha | 3 | 0.0 | 0.0 | 0.0 |
277
+ | bbbp | 64 | max_cls | 3 | 0.1 | 0.0 | 0.0 |
278
+ | clintox | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
279
+ | hiv | 32 | max_seq_mha | 3 | 0.0 | 0.0 | 0.0 |
280
+ | sider | 32 | mean | N/A | 0.1 | 0.0 | 0.1 |
281
+ | tox21 | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
282
+ | base_regression | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
283
+ | clearance | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
284
+ | esol | 64 | sum_mean | N/A | 0.1 | 0.0 | 0.1 |
285
+ | freesolv | 32 | max_seq_mha | 5 | 0.1 | 0.0 | 0.0 |
286
+ | lipo | 32 | max_seq_mha | 3 | 0.1 | 0.1 | 0.1 |
287
+
288
+ </details>
289
+
290
+ ## Hardware
291
+ Training and experiments were performed on 2 NVIDIA RTX 3090 GPUs.
292
+
293
+ ## Citation
294
+ If you use ModChemBERT in your research, please cite the checkpoint and the following:
295
+ ```
296
+ @software{cortes-2025-modchembert,
297
+ author = {Emmanuel Cortes},
298
+ title = {ModChemBERT: ModernBERT as a Chemical Language Model},
299
+ year = {2025},
300
+ publisher = {GitHub},
301
+ howpublished = {GitHub repository},
302
+ url = {https://github.com/emapco/ModChemBERT}
303
+ }
304
+ ```
305
+
306
+ ## References
307
+ 1. Kallergis, Georgios, et al. "Domain adaptable language modeling of chemical compounds identifies potent pathoblockers for Pseudomonas aeruginosa." Communications Chemistry 8.1 (2025): 114.
308
+ 2. Behrendt, Maike, Stefan Sylvius Wagner, and Stefan Harmeling. "MaxPoolBERT: Enhancing BERT Classification via Layer-and Token-Wise Aggregation." arXiv preprint arXiv:2505.15696 (2025).
309
+ 3. Sultan, Afnan, et al. "Transformers for molecular property prediction: Domain adaptation efficiently improves performance." arXiv preprint arXiv:2503.03360 (2025).
310
+ 4. Warner, Benjamin, et al. "Smarter, better, faster, longer: A modern bidirectional encoder for fast, memory efficient, and long context finetuning and inference." arXiv preprint arXiv:2412.13663 (2024).
311
+ 5. Clavié, Benjamin. "JaColBERTv2.5: Optimising Multi-Vector Retrievers to Create State-of-the-Art Japanese Retrievers with Constrained Resources." Journal of Natural Language Processing 32.1 (2025): 176-218.
312
+ 6. Grattafiori, Aaron, et al. "The llama 3 herd of models." arXiv preprint arXiv:2407.21783 (2024).
313
+ 7. Singh, Riya, et al. "ChemBERTa-3: An Open Source Training Framework for Chemical Foundation Models." (2025).
config.json ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ModChemBertForMaskedLM",
4
+ "ModChemBertForSequenceClassification"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.1,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_modchembert.ModChemBertConfig",
10
+ "AutoModelForMaskedLM": "modeling_modchembert.ModChemBertForMaskedLM",
11
+ "AutoModelForSequenceClassification": "modeling_modchembert.ModChemBertForSequenceClassification"
12
+ },
13
+ "bos_token_id": 0,
14
+ "classifier_activation": "gelu",
15
+ "classifier_bias": false,
16
+ "classifier_dropout": 0.0,
17
+ "classifier_pooling": "max_seq_mha",
18
+ "classifier_pooling_attention_dropout": 0.1,
19
+ "classifier_pooling_last_k": 3,
20
+ "classifier_pooling_num_attention_heads": 4,
21
+ "cls_token_id": 0,
22
+ "decoder_bias": true,
23
+ "deterministic_flash_attn": false,
24
+ "dtype": "bfloat16",
25
+ "embedding_dropout": 0.1,
26
+ "eos_token_id": 1,
27
+ "global_attn_every_n_layers": 3,
28
+ "global_rope_theta": 160000.0,
29
+ "hidden_activation": "gelu",
30
+ "hidden_size": 768,
31
+ "id2label": {
32
+ "0": "MaxAbsEStateIndex",
33
+ "1": "MaxEStateIndex",
34
+ "2": "MinAbsEStateIndex",
35
+ "3": "MinEStateIndex",
36
+ "4": "qed",
37
+ "5": "SPS",
38
+ "6": "MolWt",
39
+ "7": "HeavyAtomMolWt",
40
+ "8": "ExactMolWt",
41
+ "9": "NumValenceElectrons",
42
+ "10": "MaxPartialCharge",
43
+ "11": "MinPartialCharge",
44
+ "12": "MaxAbsPartialCharge",
45
+ "13": "MinAbsPartialCharge",
46
+ "14": "FpDensityMorgan1",
47
+ "15": "FpDensityMorgan2",
48
+ "16": "FpDensityMorgan3",
49
+ "17": "BCUT2D_MWHI",
50
+ "18": "BCUT2D_MWLOW",
51
+ "19": "BCUT2D_CHGHI",
52
+ "20": "BCUT2D_CHGLO",
53
+ "21": "BCUT2D_LOGPHI",
54
+ "22": "BCUT2D_LOGPLOW",
55
+ "23": "BCUT2D_MRHI",
56
+ "24": "BCUT2D_MRLOW",
57
+ "25": "AvgIpc",
58
+ "26": "BalabanJ",
59
+ "27": "BertzCT",
60
+ "28": "Chi0",
61
+ "29": "Chi0n",
62
+ "30": "Chi0v",
63
+ "31": "Chi1",
64
+ "32": "Chi1n",
65
+ "33": "Chi1v",
66
+ "34": "Chi2n",
67
+ "35": "Chi2v",
68
+ "36": "Chi3n",
69
+ "37": "Chi3v",
70
+ "38": "Chi4n",
71
+ "39": "Chi4v",
72
+ "40": "HallKierAlpha",
73
+ "41": "Kappa1",
74
+ "42": "Kappa2",
75
+ "43": "Kappa3",
76
+ "44": "LabuteASA",
77
+ "45": "PEOE_VSA1",
78
+ "46": "PEOE_VSA10",
79
+ "47": "PEOE_VSA11",
80
+ "48": "PEOE_VSA12",
81
+ "49": "PEOE_VSA13",
82
+ "50": "PEOE_VSA14",
83
+ "51": "PEOE_VSA2",
84
+ "52": "PEOE_VSA3",
85
+ "53": "PEOE_VSA4",
86
+ "54": "PEOE_VSA5",
87
+ "55": "PEOE_VSA6",
88
+ "56": "PEOE_VSA7",
89
+ "57": "PEOE_VSA8",
90
+ "58": "PEOE_VSA9",
91
+ "59": "SMR_VSA1",
92
+ "60": "SMR_VSA10",
93
+ "61": "SMR_VSA2",
94
+ "62": "SMR_VSA3",
95
+ "63": "SMR_VSA4",
96
+ "64": "SMR_VSA5",
97
+ "65": "SMR_VSA6",
98
+ "66": "SMR_VSA7",
99
+ "67": "SMR_VSA9",
100
+ "68": "SlogP_VSA1",
101
+ "69": "SlogP_VSA10",
102
+ "70": "SlogP_VSA11",
103
+ "71": "SlogP_VSA12",
104
+ "72": "SlogP_VSA2",
105
+ "73": "SlogP_VSA3",
106
+ "74": "SlogP_VSA4",
107
+ "75": "SlogP_VSA5",
108
+ "76": "SlogP_VSA6",
109
+ "77": "SlogP_VSA7",
110
+ "78": "SlogP_VSA8",
111
+ "79": "TPSA",
112
+ "80": "EState_VSA1",
113
+ "81": "EState_VSA10",
114
+ "82": "EState_VSA11",
115
+ "83": "EState_VSA2",
116
+ "84": "EState_VSA3",
117
+ "85": "EState_VSA4",
118
+ "86": "EState_VSA5",
119
+ "87": "EState_VSA6",
120
+ "88": "EState_VSA7",
121
+ "89": "EState_VSA8",
122
+ "90": "EState_VSA9",
123
+ "91": "VSA_EState1",
124
+ "92": "VSA_EState10",
125
+ "93": "VSA_EState2",
126
+ "94": "VSA_EState3",
127
+ "95": "VSA_EState4",
128
+ "96": "VSA_EState5",
129
+ "97": "VSA_EState6",
130
+ "98": "VSA_EState7",
131
+ "99": "VSA_EState8",
132
+ "100": "VSA_EState9",
133
+ "101": "FractionCSP3",
134
+ "102": "HeavyAtomCount",
135
+ "103": "NHOHCount",
136
+ "104": "NOCount",
137
+ "105": "NumAliphaticCarbocycles",
138
+ "106": "NumAliphaticHeterocycles",
139
+ "107": "NumAliphaticRings",
140
+ "108": "NumAmideBonds",
141
+ "109": "NumAromaticCarbocycles",
142
+ "110": "NumAromaticHeterocycles",
143
+ "111": "NumAromaticRings",
144
+ "112": "NumAtomStereoCenters",
145
+ "113": "NumBridgeheadAtoms",
146
+ "114": "NumHAcceptors",
147
+ "115": "NumHDonors",
148
+ "116": "NumHeteroatoms",
149
+ "117": "NumHeterocycles",
150
+ "118": "NumRotatableBonds",
151
+ "119": "NumSaturatedCarbocycles",
152
+ "120": "NumSaturatedHeterocycles",
153
+ "121": "NumSaturatedRings",
154
+ "122": "NumSpiroAtoms",
155
+ "123": "NumUnspecifiedAtomStereoCenters",
156
+ "124": "Phi",
157
+ "125": "RingCount",
158
+ "126": "MolLogP",
159
+ "127": "MolMR",
160
+ "128": "fr_Al_COO",
161
+ "129": "fr_Al_OH",
162
+ "130": "fr_Al_OH_noTert",
163
+ "131": "fr_ArN",
164
+ "132": "fr_Ar_COO",
165
+ "133": "fr_Ar_N",
166
+ "134": "fr_Ar_NH",
167
+ "135": "fr_Ar_OH",
168
+ "136": "fr_COO",
169
+ "137": "fr_COO2",
170
+ "138": "fr_C_O",
171
+ "139": "fr_C_O_noCOO",
172
+ "140": "fr_C_S",
173
+ "141": "fr_HOCCN",
174
+ "142": "fr_Imine",
175
+ "143": "fr_NH0",
176
+ "144": "fr_NH1",
177
+ "145": "fr_NH2",
178
+ "146": "fr_N_O",
179
+ "147": "fr_Ndealkylation1",
180
+ "148": "fr_Ndealkylation2",
181
+ "149": "fr_Nhpyrrole",
182
+ "150": "fr_SH",
183
+ "151": "fr_aldehyde",
184
+ "152": "fr_alkyl_carbamate",
185
+ "153": "fr_alkyl_halide",
186
+ "154": "fr_allylic_oxid",
187
+ "155": "fr_amide",
188
+ "156": "fr_amidine",
189
+ "157": "fr_aniline",
190
+ "158": "fr_aryl_methyl",
191
+ "159": "fr_azide",
192
+ "160": "fr_azo",
193
+ "161": "fr_barbitur",
194
+ "162": "fr_benzene",
195
+ "163": "fr_benzodiazepine",
196
+ "164": "fr_bicyclic",
197
+ "165": "fr_dihydropyridine",
198
+ "166": "fr_epoxide",
199
+ "167": "fr_ester",
200
+ "168": "fr_ether",
201
+ "169": "fr_furan",
202
+ "170": "fr_guanido",
203
+ "171": "fr_halogen",
204
+ "172": "fr_hdrzine",
205
+ "173": "fr_hdrzone",
206
+ "174": "fr_imidazole",
207
+ "175": "fr_imide",
208
+ "176": "fr_ketone",
209
+ "177": "fr_ketone_Topliss",
210
+ "178": "fr_lactam",
211
+ "179": "fr_lactone",
212
+ "180": "fr_methoxy",
213
+ "181": "fr_morpholine",
214
+ "182": "fr_nitrile",
215
+ "183": "fr_nitro",
216
+ "184": "fr_nitro_arom",
217
+ "185": "fr_nitro_arom_nonortho",
218
+ "186": "fr_oxazole",
219
+ "187": "fr_oxime",
220
+ "188": "fr_para_hydroxylation",
221
+ "189": "fr_phenol",
222
+ "190": "fr_phenol_noOrthoHbond",
223
+ "191": "fr_phos_acid",
224
+ "192": "fr_phos_ester",
225
+ "193": "fr_piperdine",
226
+ "194": "fr_piperzine",
227
+ "195": "fr_priamide",
228
+ "196": "fr_pyridine",
229
+ "197": "fr_quatN",
230
+ "198": "fr_sulfide",
231
+ "199": "fr_sulfonamd",
232
+ "200": "fr_sulfone",
233
+ "201": "fr_term_acetylene",
234
+ "202": "fr_tetrazole",
235
+ "203": "fr_thiazole",
236
+ "204": "fr_thiophene",
237
+ "205": "fr_unbrch_alkane",
238
+ "206": "fr_urea"
239
+ },
240
+ "initializer_cutoff_factor": 2.0,
241
+ "initializer_range": 0.02,
242
+ "intermediate_size": 1152,
243
+ "label2id": {
244
+ "AvgIpc": 25,
245
+ "BCUT2D_CHGHI": 19,
246
+ "BCUT2D_CHGLO": 20,
247
+ "BCUT2D_LOGPHI": 21,
248
+ "BCUT2D_LOGPLOW": 22,
249
+ "BCUT2D_MRHI": 23,
250
+ "BCUT2D_MRLOW": 24,
251
+ "BCUT2D_MWHI": 17,
252
+ "BCUT2D_MWLOW": 18,
253
+ "BalabanJ": 26,
254
+ "BertzCT": 27,
255
+ "Chi0": 28,
256
+ "Chi0n": 29,
257
+ "Chi0v": 30,
258
+ "Chi1": 31,
259
+ "Chi1n": 32,
260
+ "Chi1v": 33,
261
+ "Chi2n": 34,
262
+ "Chi2v": 35,
263
+ "Chi3n": 36,
264
+ "Chi3v": 37,
265
+ "Chi4n": 38,
266
+ "Chi4v": 39,
267
+ "EState_VSA1": 80,
268
+ "EState_VSA10": 81,
269
+ "EState_VSA11": 82,
270
+ "EState_VSA2": 83,
271
+ "EState_VSA3": 84,
272
+ "EState_VSA4": 85,
273
+ "EState_VSA5": 86,
274
+ "EState_VSA6": 87,
275
+ "EState_VSA7": 88,
276
+ "EState_VSA8": 89,
277
+ "EState_VSA9": 90,
278
+ "ExactMolWt": 8,
279
+ "FpDensityMorgan1": 14,
280
+ "FpDensityMorgan2": 15,
281
+ "FpDensityMorgan3": 16,
282
+ "FractionCSP3": 101,
283
+ "HallKierAlpha": 40,
284
+ "HeavyAtomCount": 102,
285
+ "HeavyAtomMolWt": 7,
286
+ "Kappa1": 41,
287
+ "Kappa2": 42,
288
+ "Kappa3": 43,
289
+ "LabuteASA": 44,
290
+ "MaxAbsEStateIndex": 0,
291
+ "MaxAbsPartialCharge": 12,
292
+ "MaxEStateIndex": 1,
293
+ "MaxPartialCharge": 10,
294
+ "MinAbsEStateIndex": 2,
295
+ "MinAbsPartialCharge": 13,
296
+ "MinEStateIndex": 3,
297
+ "MinPartialCharge": 11,
298
+ "MolLogP": 126,
299
+ "MolMR": 127,
300
+ "MolWt": 6,
301
+ "NHOHCount": 103,
302
+ "NOCount": 104,
303
+ "NumAliphaticCarbocycles": 105,
304
+ "NumAliphaticHeterocycles": 106,
305
+ "NumAliphaticRings": 107,
306
+ "NumAmideBonds": 108,
307
+ "NumAromaticCarbocycles": 109,
308
+ "NumAromaticHeterocycles": 110,
309
+ "NumAromaticRings": 111,
310
+ "NumAtomStereoCenters": 112,
311
+ "NumBridgeheadAtoms": 113,
312
+ "NumHAcceptors": 114,
313
+ "NumHDonors": 115,
314
+ "NumHeteroatoms": 116,
315
+ "NumHeterocycles": 117,
316
+ "NumRotatableBonds": 118,
317
+ "NumSaturatedCarbocycles": 119,
318
+ "NumSaturatedHeterocycles": 120,
319
+ "NumSaturatedRings": 121,
320
+ "NumSpiroAtoms": 122,
321
+ "NumUnspecifiedAtomStereoCenters": 123,
322
+ "NumValenceElectrons": 9,
323
+ "PEOE_VSA1": 45,
324
+ "PEOE_VSA10": 46,
325
+ "PEOE_VSA11": 47,
326
+ "PEOE_VSA12": 48,
327
+ "PEOE_VSA13": 49,
328
+ "PEOE_VSA14": 50,
329
+ "PEOE_VSA2": 51,
330
+ "PEOE_VSA3": 52,
331
+ "PEOE_VSA4": 53,
332
+ "PEOE_VSA5": 54,
333
+ "PEOE_VSA6": 55,
334
+ "PEOE_VSA7": 56,
335
+ "PEOE_VSA8": 57,
336
+ "PEOE_VSA9": 58,
337
+ "Phi": 124,
338
+ "RingCount": 125,
339
+ "SMR_VSA1": 59,
340
+ "SMR_VSA10": 60,
341
+ "SMR_VSA2": 61,
342
+ "SMR_VSA3": 62,
343
+ "SMR_VSA4": 63,
344
+ "SMR_VSA5": 64,
345
+ "SMR_VSA6": 65,
346
+ "SMR_VSA7": 66,
347
+ "SMR_VSA9": 67,
348
+ "SPS": 5,
349
+ "SlogP_VSA1": 68,
350
+ "SlogP_VSA10": 69,
351
+ "SlogP_VSA11": 70,
352
+ "SlogP_VSA12": 71,
353
+ "SlogP_VSA2": 72,
354
+ "SlogP_VSA3": 73,
355
+ "SlogP_VSA4": 74,
356
+ "SlogP_VSA5": 75,
357
+ "SlogP_VSA6": 76,
358
+ "SlogP_VSA7": 77,
359
+ "SlogP_VSA8": 78,
360
+ "TPSA": 79,
361
+ "VSA_EState1": 91,
362
+ "VSA_EState10": 92,
363
+ "VSA_EState2": 93,
364
+ "VSA_EState3": 94,
365
+ "VSA_EState4": 95,
366
+ "VSA_EState5": 96,
367
+ "VSA_EState6": 97,
368
+ "VSA_EState7": 98,
369
+ "VSA_EState8": 99,
370
+ "VSA_EState9": 100,
371
+ "fr_Al_COO": 128,
372
+ "fr_Al_OH": 129,
373
+ "fr_Al_OH_noTert": 130,
374
+ "fr_ArN": 131,
375
+ "fr_Ar_COO": 132,
376
+ "fr_Ar_N": 133,
377
+ "fr_Ar_NH": 134,
378
+ "fr_Ar_OH": 135,
379
+ "fr_COO": 136,
380
+ "fr_COO2": 137,
381
+ "fr_C_O": 138,
382
+ "fr_C_O_noCOO": 139,
383
+ "fr_C_S": 140,
384
+ "fr_HOCCN": 141,
385
+ "fr_Imine": 142,
386
+ "fr_NH0": 143,
387
+ "fr_NH1": 144,
388
+ "fr_NH2": 145,
389
+ "fr_N_O": 146,
390
+ "fr_Ndealkylation1": 147,
391
+ "fr_Ndealkylation2": 148,
392
+ "fr_Nhpyrrole": 149,
393
+ "fr_SH": 150,
394
+ "fr_aldehyde": 151,
395
+ "fr_alkyl_carbamate": 152,
396
+ "fr_alkyl_halide": 153,
397
+ "fr_allylic_oxid": 154,
398
+ "fr_amide": 155,
399
+ "fr_amidine": 156,
400
+ "fr_aniline": 157,
401
+ "fr_aryl_methyl": 158,
402
+ "fr_azide": 159,
403
+ "fr_azo": 160,
404
+ "fr_barbitur": 161,
405
+ "fr_benzene": 162,
406
+ "fr_benzodiazepine": 163,
407
+ "fr_bicyclic": 164,
408
+ "fr_dihydropyridine": 165,
409
+ "fr_epoxide": 166,
410
+ "fr_ester": 167,
411
+ "fr_ether": 168,
412
+ "fr_furan": 169,
413
+ "fr_guanido": 170,
414
+ "fr_halogen": 171,
415
+ "fr_hdrzine": 172,
416
+ "fr_hdrzone": 173,
417
+ "fr_imidazole": 174,
418
+ "fr_imide": 175,
419
+ "fr_ketone": 176,
420
+ "fr_ketone_Topliss": 177,
421
+ "fr_lactam": 178,
422
+ "fr_lactone": 179,
423
+ "fr_methoxy": 180,
424
+ "fr_morpholine": 181,
425
+ "fr_nitrile": 182,
426
+ "fr_nitro": 183,
427
+ "fr_nitro_arom": 184,
428
+ "fr_nitro_arom_nonortho": 185,
429
+ "fr_oxazole": 186,
430
+ "fr_oxime": 187,
431
+ "fr_para_hydroxylation": 188,
432
+ "fr_phenol": 189,
433
+ "fr_phenol_noOrthoHbond": 190,
434
+ "fr_phos_acid": 191,
435
+ "fr_phos_ester": 192,
436
+ "fr_piperdine": 193,
437
+ "fr_piperzine": 194,
438
+ "fr_priamide": 195,
439
+ "fr_pyridine": 196,
440
+ "fr_quatN": 197,
441
+ "fr_sulfide": 198,
442
+ "fr_sulfonamd": 199,
443
+ "fr_sulfone": 200,
444
+ "fr_term_acetylene": 201,
445
+ "fr_tetrazole": 202,
446
+ "fr_thiazole": 203,
447
+ "fr_thiophene": 204,
448
+ "fr_unbrch_alkane": 205,
449
+ "fr_urea": 206,
450
+ "qed": 4
451
+ },
452
+ "layer_norm_eps": 1e-05,
453
+ "local_attention": 8,
454
+ "local_rope_theta": 10000.0,
455
+ "max_position_embeddings": 256,
456
+ "mlp_bias": false,
457
+ "mlp_dropout": 0.1,
458
+ "model_type": "modchembert",
459
+ "norm_bias": false,
460
+ "norm_eps": 1e-05,
461
+ "num_attention_heads": 12,
462
+ "num_hidden_layers": 22,
463
+ "num_labels": 207,
464
+ "pad_token_id": 2,
465
+ "position_embedding_type": "absolute",
466
+ "problem_type": "regression",
467
+ "repad_logits_with_grad": false,
468
+ "sep_token_id": 1,
469
+ "sparse_pred_ignore_index": -100,
470
+ "sparse_prediction": false,
471
+ "transformers_version": "4.56.1",
472
+ "vocab_size": 2362
473
+ }
configuration_modchembert.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Emmanuel Cortes, All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Literal
16
+
17
+ from transformers.models.modernbert.configuration_modernbert import ModernBertConfig
18
+
19
+
20
+ class ModChemBertConfig(ModernBertConfig):
21
+ """
22
+ Configuration class for ModChemBert models.
23
+
24
+ This configuration class extends ModernBertConfig with additional parameters specific to
25
+ chemical molecule modeling and custom pooling strategies for classification/regression tasks.
26
+ It accepts all arguments and keyword arguments from ModernBertConfig.
27
+
28
+ Args:
29
+ classifier_pooling (str, optional): Pooling strategy for sequence classification.
30
+ Available options:
31
+ - "cls": Use CLS token representation
32
+ - "mean": Attention-weighted average pooling
33
+ - "sum_mean": Sum all hidden states across layers, then mean pool over sequence (ChemLM approach)
34
+ - "sum_sum": Sum all hidden states across layers, then sum pool over sequence
35
+ - "mean_mean": Mean all hidden states across layers, then mean pool over sequence
36
+ - "mean_sum": Mean all hidden states across layers, then sum pool over sequence
37
+ - "max_cls": Element-wise max pooling over last k hidden states, then take CLS token
38
+ - "cls_mha": Multi-head attention with CLS token as query and full sequence as keys/values
39
+ - "max_seq_mha": Max pooling over last k states + multi-head attention with CLS as query
40
+ - "max_seq_mean": Max pooling over last k hidden states, then mean pooling over sequence
41
+ Defaults to "sum_mean".
42
+ classifier_pooling_num_attention_heads (int, optional): Number of attention heads for multi-head attention
43
+ pooling strategies (cls_mha, max_seq_mha). Defaults to 4.
44
+ classifier_pooling_attention_dropout (float, optional): Dropout probability for multi-head attention
45
+ pooling strategies (cls_mha, max_seq_mha). Defaults to 0.0.
46
+ classifier_pooling_last_k (int, optional): Number of last hidden layers to use for max pooling
47
+ strategies (max_cls, max_seq_mha, max_seq_mean). Defaults to 8.
48
+ *args: Variable length argument list passed to ModernBertConfig.
49
+ **kwargs: Arbitrary keyword arguments passed to ModernBertConfig.
50
+
51
+ Note:
52
+ This class inherits all configuration parameters from ModernBertConfig including
53
+ hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, etc.
54
+ """
55
+
56
+ model_type = "modchembert"
57
+
58
+ def __init__(
59
+ self,
60
+ *args,
61
+ classifier_pooling: Literal[
62
+ "cls",
63
+ "mean",
64
+ "sum_mean",
65
+ "sum_sum",
66
+ "mean_mean",
67
+ "mean_sum",
68
+ "max_cls",
69
+ "cls_mha",
70
+ "max_seq_mha",
71
+ "max_seq_mean",
72
+ ] = "max_seq_mha",
73
+ classifier_pooling_num_attention_heads: int = 4,
74
+ classifier_pooling_attention_dropout: float = 0.0,
75
+ classifier_pooling_last_k: int = 8,
76
+ **kwargs,
77
+ ):
78
+ # Pass classifier_pooling="cls" to circumvent ValueError in ModernBertConfig init
79
+ super().__init__(*args, classifier_pooling="cls", **kwargs)
80
+ # Override with custom value
81
+ self.classifier_pooling = classifier_pooling
82
+ self.classifier_pooling_num_attention_heads = classifier_pooling_num_attention_heads
83
+ self.classifier_pooling_attention_dropout = classifier_pooling_attention_dropout
84
+ self.classifier_pooling_last_k = classifier_pooling_last_k
logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_bace_classification_epochs100_batch_size32_20250918_222031.log ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 22:20:31,631 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Running benchmark for dataset: bace_classification
2
+ 2025-09-18 22:20:31,631 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - dataset: bace_classification, tasks: ['Class'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 22:20:31,645 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset bace_classification at 2025-09-18_22-20-31
4
+ 2025-09-18 22:20:41,426 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5197 | Val mean-roc_auc_score: 0.6669
5
+ 2025-09-18 22:20:41,426 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 38
6
+ 2025-09-18 22:20:41,954 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.6669
7
+ 2025-09-18 22:20:45,937 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3618 | Val mean-roc_auc_score: 0.6726
8
+ 2025-09-18 22:20:46,109 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 76
9
+ 2025-09-18 22:20:46,636 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.6726
10
+ 2025-09-18 22:20:50,411 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3192 | Val mean-roc_auc_score: 0.6882
11
+ 2025-09-18 22:20:50,588 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 114
12
+ 2025-09-18 22:20:51,106 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.6882
13
+ 2025-09-18 22:20:54,683 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2845 | Val mean-roc_auc_score: 0.6795
14
+ 2025-09-18 22:20:58,312 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2434 | Val mean-roc_auc_score: 0.6685
15
+ 2025-09-18 22:21:02,050 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2299 | Val mean-roc_auc_score: 0.7061
16
+ 2025-09-18 22:21:02,496 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 228
17
+ 2025-09-18 22:21:03,021 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.7061
18
+ 2025-09-18 22:21:07,036 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.2336 | Val mean-roc_auc_score: 0.7144
19
+ 2025-09-18 22:21:07,216 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 266
20
+ 2025-09-18 22:21:07,735 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val mean-roc_auc_score: 0.7144
21
+ 2025-09-18 22:21:11,602 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.2451 | Val mean-roc_auc_score: 0.6727
22
+ 2025-09-18 22:21:15,459 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1653 | Val mean-roc_auc_score: 0.6930
23
+ 2025-09-18 22:21:19,246 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1776 | Val mean-roc_auc_score: 0.7197
24
+ 2025-09-18 22:21:19,389 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 380
25
+ 2025-09-18 22:21:19,912 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.7197
26
+ 2025-09-18 22:21:23,682 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1632 | Val mean-roc_auc_score: 0.6940
27
+ 2025-09-18 22:21:28,008 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1505 | Val mean-roc_auc_score: 0.6967
28
+ 2025-09-18 22:21:31,997 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1151 | Val mean-roc_auc_score: 0.6990
29
+ 2025-09-18 22:21:35,951 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0947 | Val mean-roc_auc_score: 0.7029
30
+ 2025-09-18 22:21:39,803 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1135 | Val mean-roc_auc_score: 0.6894
31
+ 2025-09-18 22:21:43,621 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1221 | Val mean-roc_auc_score: 0.7330
32
+ 2025-09-18 22:21:44,075 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 608
33
+ 2025-09-18 22:21:44,591 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 16 with val mean-roc_auc_score: 0.7330
34
+ 2025-09-18 22:21:48,564 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0921 | Val mean-roc_auc_score: 0.7215
35
+ 2025-09-18 22:21:52,290 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0765 | Val mean-roc_auc_score: 0.6803
36
+ 2025-09-18 22:21:56,142 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1094 | Val mean-roc_auc_score: 0.7094
37
+ 2025-09-18 22:22:00,108 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0925 | Val mean-roc_auc_score: 0.7203
38
+ 2025-09-18 22:22:04,169 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0506 | Val mean-roc_auc_score: 0.7138
39
+ 2025-09-18 22:22:08,221 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0553 | Val mean-roc_auc_score: 0.6977
40
+ 2025-09-18 22:22:12,105 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0670 | Val mean-roc_auc_score: 0.7022
41
+ 2025-09-18 22:22:16,111 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0326 | Val mean-roc_auc_score: 0.7026
42
+ 2025-09-18 22:22:20,250 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0863 | Val mean-roc_auc_score: 0.6991
43
+ 2025-09-18 22:22:24,280 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0839 | Val mean-roc_auc_score: 0.7241
44
+ 2025-09-18 22:22:29,616 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0409 | Val mean-roc_auc_score: 0.7021
45
+ 2025-09-18 22:22:33,624 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0329 | Val mean-roc_auc_score: 0.7067
46
+ 2025-09-18 22:22:37,446 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0674 | Val mean-roc_auc_score: 0.7117
47
+ 2025-09-18 22:22:41,264 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0744 | Val mean-roc_auc_score: 0.7181
48
+ 2025-09-18 22:22:45,196 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0419 | Val mean-roc_auc_score: 0.7059
49
+ 2025-09-18 22:22:49,548 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0223 | Val mean-roc_auc_score: 0.7031
50
+ 2025-09-18 22:22:53,600 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0164 | Val mean-roc_auc_score: 0.7118
51
+ 2025-09-18 22:22:57,478 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0162 | Val mean-roc_auc_score: 0.7039
52
+ 2025-09-18 22:23:01,406 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0115 | Val mean-roc_auc_score: 0.7048
53
+ 2025-09-18 22:23:05,479 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0298 | Val mean-roc_auc_score: 0.6971
54
+ 2025-09-18 22:23:09,797 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.1283 | Val mean-roc_auc_score: 0.7367
55
+ 2025-09-18 22:23:09,940 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 1406
56
+ 2025-09-18 22:23:10,454 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 37 with val mean-roc_auc_score: 0.7367
57
+ 2025-09-18 22:23:14,306 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0839 | Val mean-roc_auc_score: 0.7380
58
+ 2025-09-18 22:23:14,485 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 1444
59
+ 2025-09-18 22:23:15,007 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 38 with val mean-roc_auc_score: 0.7380
60
+ 2025-09-18 22:23:19,072 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0504 | Val mean-roc_auc_score: 0.7203
61
+ 2025-09-18 22:23:22,944 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0216 | Val mean-roc_auc_score: 0.7040
62
+ 2025-09-18 22:23:26,880 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0200 | Val mean-roc_auc_score: 0.6888
63
+ 2025-09-18 22:23:31,248 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0175 | Val mean-roc_auc_score: 0.7095
64
+ 2025-09-18 22:23:35,338 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0685 | Val mean-roc_auc_score: 0.7096
65
+ 2025-09-18 22:23:39,267 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0559 | Val mean-roc_auc_score: 0.7073
66
+ 2025-09-18 22:23:43,247 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0387 | Val mean-roc_auc_score: 0.7010
67
+ 2025-09-18 22:23:47,400 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0211 | Val mean-roc_auc_score: 0.6900
68
+ 2025-09-18 22:23:51,708 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0118 | Val mean-roc_auc_score: 0.6907
69
+ 2025-09-18 22:23:55,657 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0127 | Val mean-roc_auc_score: 0.6872
70
+ 2025-09-18 22:23:59,692 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0153 | Val mean-roc_auc_score: 0.7002
71
+ 2025-09-18 22:24:03,787 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0196 | Val mean-roc_auc_score: 0.7034
72
+ 2025-09-18 22:24:07,888 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.6963
73
+ 2025-09-18 22:24:12,083 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.6935
74
+ 2025-09-18 22:24:16,801 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0180 | Val mean-roc_auc_score: 0.6976
75
+ 2025-09-18 22:24:20,801 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0085 | Val mean-roc_auc_score: 0.6929
76
+ 2025-09-18 22:24:24,740 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.6989
77
+ 2025-09-18 22:24:28,589 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0112 | Val mean-roc_auc_score: 0.7018
78
+ 2025-09-18 22:24:32,846 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0130 | Val mean-roc_auc_score: 0.7082
79
+ 2025-09-18 22:24:36,996 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.7028
80
+ 2025-09-18 22:24:41,078 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.7027
81
+ 2025-09-18 22:24:44,979 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.7034
82
+ 2025-09-18 22:24:49,033 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0091 | Val mean-roc_auc_score: 0.7130
83
+ 2025-09-18 22:24:53,493 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0188 | Val mean-roc_auc_score: 0.6783
84
+ 2025-09-18 22:24:57,444 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0401 | Val mean-roc_auc_score: 0.6827
85
+ 2025-09-18 22:25:01,342 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0815 | Val mean-roc_auc_score: 0.6886
86
+ 2025-09-18 22:25:05,321 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0273 | Val mean-roc_auc_score: 0.6893
87
+ 2025-09-18 22:25:09,427 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0266 | Val mean-roc_auc_score: 0.6927
88
+ 2025-09-18 22:25:13,739 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0140 | Val mean-roc_auc_score: 0.6946
89
+ 2025-09-18 22:25:17,726 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0139 | Val mean-roc_auc_score: 0.6963
90
+ 2025-09-18 22:25:21,839 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0127 | Val mean-roc_auc_score: 0.6969
91
+ 2025-09-18 22:25:25,958 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0141 | Val mean-roc_auc_score: 0.6891
92
+ 2025-09-18 22:25:30,039 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.6907
93
+ 2025-09-18 22:25:34,277 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.6980
94
+ 2025-09-18 22:25:38,382 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.6898
95
+ 2025-09-18 22:25:42,529 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.6931
96
+ 2025-09-18 22:25:46,556 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0077 | Val mean-roc_auc_score: 0.6948
97
+ 2025-09-18 22:25:50,434 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.6981
98
+ 2025-09-18 22:25:54,857 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.6977
99
+ 2025-09-18 22:25:58,945 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.6989
100
+ 2025-09-18 22:26:03,907 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.6974
101
+ 2025-09-18 22:26:08,062 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.7015
102
+ 2025-09-18 22:26:12,205 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.7003
103
+ 2025-09-18 22:26:16,501 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0177 | Val mean-roc_auc_score: 0.6928
104
+ 2025-09-18 22:26:20,469 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0187 | Val mean-roc_auc_score: 0.6951
105
+ 2025-09-18 22:26:24,498 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0138 | Val mean-roc_auc_score: 0.6857
106
+ 2025-09-18 22:26:28,521 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0148 | Val mean-roc_auc_score: 0.6885
107
+ 2025-09-18 22:26:32,308 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0191 | Val mean-roc_auc_score: 0.7012
108
+ 2025-09-18 22:26:36,752 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.7031
109
+ 2025-09-18 22:26:40,869 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.7050
110
+ 2025-09-18 22:26:44,820 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.6979
111
+ 2025-09-18 22:26:48,638 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.6986
112
+ 2025-09-18 22:26:52,754 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.7009
113
+ 2025-09-18 22:26:57,113 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.6998
114
+ 2025-09-18 22:27:01,168 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.7012
115
+ 2025-09-18 22:27:05,260 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.7008
116
+ 2025-09-18 22:27:09,604 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.6996
117
+ 2025-09-18 22:27:13,687 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.7029
118
+ 2025-09-18 22:27:18,161 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0168 | Val mean-roc_auc_score: 0.7004
119
+ 2025-09-18 22:27:22,647 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0169 | Val mean-roc_auc_score: 0.7077
120
+ 2025-09-18 22:27:27,378 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0226 | Val mean-roc_auc_score: 0.7012
121
+ 2025-09-18 22:27:32,569 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.7029
122
+ 2025-09-18 22:27:33,358 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.8370
123
+ 2025-09-18 22:27:33,802 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset bace_classification at 2025-09-18_22-27-33
124
+ 2025-09-18 22:27:38,925 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5296 | Val mean-roc_auc_score: 0.6792
125
+ 2025-09-18 22:27:38,925 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 38
126
+ 2025-09-18 22:27:39,457 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.6792
127
+ 2025-09-18 22:27:43,864 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3914 | Val mean-roc_auc_score: 0.6745
128
+ 2025-09-18 22:27:48,687 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3147 | Val mean-roc_auc_score: 0.6899
129
+ 2025-09-18 22:27:48,845 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 114
130
+ 2025-09-18 22:27:49,403 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.6899
131
+ 2025-09-18 22:27:53,776 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2829 | Val mean-roc_auc_score: 0.6746
132
+ 2025-09-18 22:27:59,833 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2253 | Val mean-roc_auc_score: 0.6874
133
+ 2025-09-18 22:28:05,554 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2388 | Val mean-roc_auc_score: 0.6776
134
+ 2025-09-18 22:28:12,336 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.2015 | Val mean-roc_auc_score: 0.6937
135
+ 2025-09-18 22:28:12,478 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 266
136
+ 2025-09-18 22:28:13,043 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val mean-roc_auc_score: 0.6937
137
+ 2025-09-18 22:28:17,921 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1699 | Val mean-roc_auc_score: 0.7109
138
+ 2025-09-18 22:28:18,173 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 304
139
+ 2025-09-18 22:28:18,706 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.7109
140
+ 2025-09-18 22:28:24,883 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1669 | Val mean-roc_auc_score: 0.7259
141
+ 2025-09-18 22:28:25,060 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 342
142
+ 2025-09-18 22:28:25,628 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.7259
143
+ 2025-09-18 22:28:31,770 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1628 | Val mean-roc_auc_score: 0.6897
144
+ 2025-09-18 22:28:38,115 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1380 | Val mean-roc_auc_score: 0.6860
145
+ 2025-09-18 22:28:44,697 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1637 | Val mean-roc_auc_score: 0.7106
146
+ 2025-09-18 22:28:49,962 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1151 | Val mean-roc_auc_score: 0.6997
147
+ 2025-09-18 22:28:56,308 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1079 | Val mean-roc_auc_score: 0.6751
148
+ 2025-09-18 22:29:01,453 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1094 | Val mean-roc_auc_score: 0.7238
149
+ 2025-09-18 22:29:07,621 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1992 | Val mean-roc_auc_score: 0.6574
150
+ 2025-09-18 22:29:13,211 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1365 | Val mean-roc_auc_score: 0.7054
151
+ 2025-09-18 22:29:19,316 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0905 | Val mean-roc_auc_score: 0.6701
152
+ 2025-09-18 22:29:23,531 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0661 | Val mean-roc_auc_score: 0.6873
153
+ 2025-09-18 22:29:27,621 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0530 | Val mean-roc_auc_score: 0.6865
154
+ 2025-09-18 22:29:31,688 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0691 | Val mean-roc_auc_score: 0.6895
155
+ 2025-09-18 22:29:36,338 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0690 | Val mean-roc_auc_score: 0.6981
156
+ 2025-09-18 22:29:40,586 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0424 | Val mean-roc_auc_score: 0.6972
157
+ 2025-09-18 22:29:44,596 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0374 | Val mean-roc_auc_score: 0.6971
158
+ 2025-09-18 22:29:48,900 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0263 | Val mean-roc_auc_score: 0.7012
159
+ 2025-09-18 22:29:53,154 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0510 | Val mean-roc_auc_score: 0.6908
160
+ 2025-09-18 22:29:58,584 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1322 | Val mean-roc_auc_score: 0.7015
161
+ 2025-09-18 22:30:02,583 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.7057
162
+ 2025-09-18 22:30:06,923 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0166 | Val mean-roc_auc_score: 0.7118
163
+ 2025-09-18 22:30:11,230 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0278 | Val mean-roc_auc_score: 0.7002
164
+ 2025-09-18 22:30:15,390 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0403 | Val mean-roc_auc_score: 0.6977
165
+ 2025-09-18 22:30:19,783 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1953 | Val mean-roc_auc_score: 0.6819
166
+ 2025-09-18 22:30:24,089 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.1061 | Val mean-roc_auc_score: 0.6817
167
+ 2025-09-18 22:30:28,464 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0387 | Val mean-roc_auc_score: 0.6815
168
+ 2025-09-18 22:30:32,889 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0283 | Val mean-roc_auc_score: 0.6714
169
+ 2025-09-18 22:30:37,375 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0238 | Val mean-roc_auc_score: 0.6768
170
+ 2025-09-18 22:30:42,026 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0508 | Val mean-roc_auc_score: 0.6737
171
+ 2025-09-18 22:30:46,354 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0154 | Val mean-roc_auc_score: 0.6825
172
+ 2025-09-18 22:30:50,842 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.6804
173
+ 2025-09-18 22:30:55,541 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0149 | Val mean-roc_auc_score: 0.6823
174
+ 2025-09-18 22:31:00,326 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0099 | Val mean-roc_auc_score: 0.6795
175
+ 2025-09-18 22:31:05,613 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0140 | Val mean-roc_auc_score: 0.6795
176
+ 2025-09-18 22:31:10,616 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0133 | Val mean-roc_auc_score: 0.6875
177
+ 2025-09-18 22:31:15,598 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0176 | Val mean-roc_auc_score: 0.6808
178
+ 2025-09-18 22:31:20,424 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.6702
179
+ 2025-09-18 22:31:24,642 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0126 | Val mean-roc_auc_score: 0.6773
180
+ 2025-09-18 22:31:29,236 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0125 | Val mean-roc_auc_score: 0.6856
181
+ 2025-09-18 22:31:33,603 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.6871
182
+ 2025-09-18 22:31:37,826 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0152 | Val mean-roc_auc_score: 0.6772
183
+ 2025-09-18 22:31:42,067 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0387 | Val mean-roc_auc_score: 0.6752
184
+ 2025-09-18 22:31:46,208 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0288 | Val mean-roc_auc_score: 0.6900
185
+ 2025-09-18 22:31:50,680 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0195 | Val mean-roc_auc_score: 0.6803
186
+ 2025-09-18 22:31:55,480 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0139 | Val mean-roc_auc_score: 0.6781
187
+ 2025-09-18 22:31:59,479 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0100 | Val mean-roc_auc_score: 0.6826
188
+ 2025-09-18 22:32:03,517 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.6789
189
+ 2025-09-18 22:32:07,665 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.6787
190
+ 2025-09-18 22:32:11,981 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.6783
191
+ 2025-09-18 22:32:16,003 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0260 | Val mean-roc_auc_score: 0.6843
192
+ 2025-09-18 22:32:20,131 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0117 | Val mean-roc_auc_score: 0.6891
193
+ 2025-09-18 22:32:24,293 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.6784
194
+ 2025-09-18 22:32:28,488 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.6757
195
+ 2025-09-18 22:32:32,869 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.6776
196
+ 2025-09-18 22:32:36,957 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.6779
197
+ 2025-09-18 22:32:40,727 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0118 | Val mean-roc_auc_score: 0.6750
198
+ 2025-09-18 22:32:44,491 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0286 | Val mean-roc_auc_score: 0.6669
199
+ 2025-09-18 22:32:48,287 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0742 | Val mean-roc_auc_score: 0.6621
200
+ 2025-09-18 22:32:52,585 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0806 | Val mean-roc_auc_score: 0.6913
201
+ 2025-09-18 22:32:56,464 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0280 | Val mean-roc_auc_score: 0.6857
202
+ 2025-09-18 22:33:00,434 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0146 | Val mean-roc_auc_score: 0.6690
203
+ 2025-09-18 22:33:04,657 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.6768
204
+ 2025-09-18 22:33:08,908 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0153 | Val mean-roc_auc_score: 0.6814
205
+ 2025-09-18 22:33:13,414 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0512 | Val mean-roc_auc_score: 0.6680
206
+ 2025-09-18 22:33:17,745 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0205 | Val mean-roc_auc_score: 0.6640
207
+ 2025-09-18 22:33:22,054 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0358 | Val mean-roc_auc_score: 0.6902
208
+ 2025-09-18 22:33:27,231 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0350 | Val mean-roc_auc_score: 0.6915
209
+ 2025-09-18 22:33:32,889 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0282 | Val mean-roc_auc_score: 0.6616
210
+ 2025-09-18 22:33:38,593 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.6774
211
+ 2025-09-18 22:33:44,498 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.6803
212
+ 2025-09-18 22:33:51,554 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0114 | Val mean-roc_auc_score: 0.6796
213
+ 2025-09-18 22:33:56,772 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.6795
214
+ 2025-09-18 22:34:02,362 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.6787
215
+ 2025-09-18 22:34:08,809 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.6784
216
+ 2025-09-18 22:34:13,901 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.6796
217
+ 2025-09-18 22:34:19,799 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.6827
218
+ 2025-09-18 22:34:25,979 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.6815
219
+ 2025-09-18 22:34:31,123 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.6780
220
+ 2025-09-18 22:34:36,959 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.6751
221
+ 2025-09-18 22:34:41,253 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.6786
222
+ 2025-09-18 22:34:45,431 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.6783
223
+ 2025-09-18 22:34:49,756 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.6787
224
+ 2025-09-18 22:34:54,026 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.6796
225
+ 2025-09-18 22:34:58,456 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.6813
226
+ 2025-09-18 22:35:02,760 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.6863
227
+ 2025-09-18 22:35:06,987 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.6847
228
+ 2025-09-18 22:35:11,433 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.6839
229
+ 2025-09-18 22:35:15,758 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.6831
230
+ 2025-09-18 22:35:20,327 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.6842
231
+ 2025-09-18 22:35:24,545 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.6853
232
+ 2025-09-18 22:35:29,036 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.6863
233
+ 2025-09-18 22:35:33,472 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.6861
234
+ 2025-09-18 22:35:33,896 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.8009
235
+ 2025-09-18 22:35:34,251 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset bace_classification at 2025-09-18_22-35-34
236
+ 2025-09-18 22:35:38,108 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5493 | Val mean-roc_auc_score: 0.6859
237
+ 2025-09-18 22:35:38,108 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 38
238
+ 2025-09-18 22:35:38,708 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.6859
239
+ 2025-09-18 22:35:42,970 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3832 | Val mean-roc_auc_score: 0.6935
240
+ 2025-09-18 22:35:43,164 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 76
241
+ 2025-09-18 22:35:43,740 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.6935
242
+ 2025-09-18 22:35:48,208 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.3728 | Val mean-roc_auc_score: 0.7070
243
+ 2025-09-18 22:35:48,388 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 114
244
+ 2025-09-18 22:35:48,948 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7070
245
+ 2025-09-18 22:35:53,185 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2845 | Val mean-roc_auc_score: 0.7181
246
+ 2025-09-18 22:35:53,379 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 152
247
+ 2025-09-18 22:35:53,944 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.7181
248
+ 2025-09-18 22:35:58,433 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2730 | Val mean-roc_auc_score: 0.7398
249
+ 2025-09-18 22:35:58,613 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 190
250
+ 2025-09-18 22:35:59,202 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.7398
251
+ 2025-09-18 22:36:05,828 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2210 | Val mean-roc_auc_score: 0.6948
252
+ 2025-09-18 22:36:12,404 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1850 | Val mean-roc_auc_score: 0.6931
253
+ 2025-09-18 22:36:17,151 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.2578 | Val mean-roc_auc_score: 0.7002
254
+ 2025-09-18 22:36:23,473 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1719 | Val mean-roc_auc_score: 0.7203
255
+ 2025-09-18 22:36:29,658 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1678 | Val mean-roc_auc_score: 0.7061
256
+ 2025-09-18 22:36:34,879 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1675 | Val mean-roc_auc_score: 0.7464
257
+ 2025-09-18 22:36:35,311 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Global step of best model: 418
258
+ 2025-09-18 22:36:35,865 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.7464
259
+ 2025-09-18 22:36:41,794 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1118 | Val mean-roc_auc_score: 0.6958
260
+ 2025-09-18 22:36:47,974 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1382 | Val mean-roc_auc_score: 0.7297
261
+ 2025-09-18 22:36:54,192 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1182 | Val mean-roc_auc_score: 0.7062
262
+ 2025-09-18 22:37:00,290 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0979 | Val mean-roc_auc_score: 0.7221
263
+ 2025-09-18 22:37:05,883 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1099 | Val mean-roc_auc_score: 0.7272
264
+ 2025-09-18 22:37:12,402 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0773 | Val mean-roc_auc_score: 0.7061
265
+ 2025-09-18 22:37:18,754 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1299 | Val mean-roc_auc_score: 0.7065
266
+ 2025-09-18 22:37:23,831 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0717 | Val mean-roc_auc_score: 0.6919
267
+ 2025-09-18 22:37:30,108 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7047
268
+ 2025-09-18 22:37:36,407 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0658 | Val mean-roc_auc_score: 0.6881
269
+ 2025-09-18 22:37:42,342 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0484 | Val mean-roc_auc_score: 0.7000
270
+ 2025-09-18 22:37:48,669 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0966 | Val mean-roc_auc_score: 0.7240
271
+ 2025-09-18 22:37:54,815 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0563 | Val mean-roc_auc_score: 0.7053
272
+ 2025-09-18 22:38:01,168 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0629 | Val mean-roc_auc_score: 0.7105
273
+ 2025-09-18 22:38:07,510 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0376 | Val mean-roc_auc_score: 0.7151
274
+ 2025-09-18 22:38:14,965 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0367 | Val mean-roc_auc_score: 0.7225
275
+ 2025-09-18 22:38:21,401 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0380 | Val mean-roc_auc_score: 0.7241
276
+ 2025-09-18 22:38:27,124 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0356 | Val mean-roc_auc_score: 0.7174
277
+ 2025-09-18 22:38:33,419 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0362 | Val mean-roc_auc_score: 0.7134
278
+ 2025-09-18 22:38:39,820 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0483 | Val mean-roc_auc_score: 0.7186
279
+ 2025-09-18 22:38:44,872 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0251 | Val mean-roc_auc_score: 0.7026
280
+ 2025-09-18 22:38:49,286 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0174 | Val mean-roc_auc_score: 0.7120
281
+ 2025-09-18 22:38:53,613 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0171 | Val mean-roc_auc_score: 0.7160
282
+ 2025-09-18 22:38:57,940 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0156 | Val mean-roc_auc_score: 0.7001
283
+ 2025-09-18 22:39:02,444 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0261 | Val mean-roc_auc_score: 0.7145
284
+ 2025-09-18 22:39:07,065 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.7138
285
+ 2025-09-18 22:39:11,383 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0393 | Val mean-roc_auc_score: 0.7262
286
+ 2025-09-18 22:39:16,559 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0740 | Val mean-roc_auc_score: 0.6862
287
+ 2025-09-18 22:39:23,609 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0527 | Val mean-roc_auc_score: 0.7176
288
+ 2025-09-18 22:39:29,241 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0224 | Val mean-roc_auc_score: 0.7251
289
+ 2025-09-18 22:39:36,924 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0142 | Val mean-roc_auc_score: 0.7187
290
+ 2025-09-18 22:39:43,652 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.7175
291
+ 2025-09-18 22:39:49,807 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.7174
292
+ 2025-09-18 22:39:56,225 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0091 | Val mean-roc_auc_score: 0.7186
293
+ 2025-09-18 22:40:03,131 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.7183
294
+ 2025-09-18 22:40:10,275 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.7183
295
+ 2025-09-18 22:40:16,419 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0133 | Val mean-roc_auc_score: 0.7237
296
+ 2025-09-18 22:40:21,901 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0339 | Val mean-roc_auc_score: 0.7321
297
+ 2025-09-18 22:40:28,726 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0450 | Val mean-roc_auc_score: 0.7058
298
+ 2025-09-18 22:40:33,477 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0343 | Val mean-roc_auc_score: 0.7010
299
+ 2025-09-18 22:40:38,586 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0237 | Val mean-roc_auc_score: 0.7098
300
+ 2025-09-18 22:40:44,473 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0642 | Val mean-roc_auc_score: 0.7195
301
+ 2025-09-18 22:40:49,015 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0405 | Val mean-roc_auc_score: 0.7283
302
+ 2025-09-18 22:40:53,622 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0487 | Val mean-roc_auc_score: 0.7235
303
+ 2025-09-18 22:40:58,231 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0300 | Val mean-roc_auc_score: 0.7127
304
+ 2025-09-18 22:41:03,181 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0180 | Val mean-roc_auc_score: 0.7262
305
+ 2025-09-18 22:41:07,763 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0133 | Val mean-roc_auc_score: 0.7181
306
+ 2025-09-18 22:41:11,973 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.7266
307
+ 2025-09-18 22:41:16,475 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0119 | Val mean-roc_auc_score: 0.7304
308
+ 2025-09-18 22:41:20,920 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.7312
309
+ 2025-09-18 22:41:25,758 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.7293
310
+ 2025-09-18 22:41:30,283 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.7267
311
+ 2025-09-18 22:41:34,534 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.7310
312
+ 2025-09-18 22:41:39,324 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0395 | Val mean-roc_auc_score: 0.7294
313
+ 2025-09-18 22:41:44,046 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0447 | Val mean-roc_auc_score: 0.7254
314
+ 2025-09-18 22:41:48,791 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0248 | Val mean-roc_auc_score: 0.7209
315
+ 2025-09-18 22:41:53,349 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.7341
316
+ 2025-09-18 22:41:57,710 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.7265
317
+ 2025-09-18 22:42:02,261 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.7254
318
+ 2025-09-18 22:42:06,848 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.7217
319
+ 2025-09-18 22:42:11,619 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0100 | Val mean-roc_auc_score: 0.7289
320
+ 2025-09-18 22:42:16,165 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.7277
321
+ 2025-09-18 22:42:20,463 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.7278
322
+ 2025-09-18 22:42:24,953 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.7236
323
+ 2025-09-18 22:42:29,448 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.7254
324
+ 2025-09-18 22:42:34,287 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.7254
325
+ 2025-09-18 22:42:38,821 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.7211
326
+ 2025-09-18 22:42:44,095 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.7259
327
+ 2025-09-18 22:42:48,583 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.7267
328
+ 2025-09-18 22:42:52,986 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.7261
329
+ 2025-09-18 22:42:57,633 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.7234
330
+ 2025-09-18 22:43:02,082 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.7225
331
+ 2025-09-18 22:43:06,460 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0137 | Val mean-roc_auc_score: 0.7163
332
+ 2025-09-18 22:43:10,993 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.7140
333
+ 2025-09-18 22:43:15,515 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0079 | Val mean-roc_auc_score: 0.7148
334
+ 2025-09-18 22:43:20,248 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.7227
335
+ 2025-09-18 22:43:24,715 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.7191
336
+ 2025-09-18 22:43:29,053 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.7217
337
+ 2025-09-18 22:43:33,410 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.7236
338
+ 2025-09-18 22:43:37,618 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.7220
339
+ 2025-09-18 22:43:42,491 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.7213
340
+ 2025-09-18 22:43:47,067 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.7209
341
+ 2025-09-18 22:43:51,545 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.7201
342
+ 2025-09-18 22:43:56,108 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.7193
343
+ 2025-09-18 22:44:00,616 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.7205
344
+ 2025-09-18 22:44:05,448 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.7201
345
+ 2025-09-18 22:44:09,944 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.7224
346
+ 2025-09-18 22:44:14,242 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.7164
347
+ 2025-09-18 22:44:18,773 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0155 | Val mean-roc_auc_score: 0.7153
348
+ 2025-09-18 22:44:19,391 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.8295
349
+ 2025-09-18 22:44:19,733 - logs_modchembert_bace_classification_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.8224, Std Dev: 0.0156
logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_bbbp_epochs100_batch_size32_20250918_224419.log ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 22:44:19,734 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Running benchmark for dataset: bbbp
2
+ 2025-09-18 22:44:19,734 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - dataset: bbbp, tasks: ['p_np'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 22:44:19,747 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset bbbp at 2025-09-18_22-44-19
4
+ 2025-09-18 22:44:24,859 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2608 | Val mean-roc_auc_score: 0.9795
5
+ 2025-09-18 22:44:24,859 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 52
6
+ 2025-09-18 22:44:25,400 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9795
7
+ 2025-09-18 22:44:31,290 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1035 | Val mean-roc_auc_score: 0.9826
8
+ 2025-09-18 22:44:31,471 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 104
9
+ 2025-09-18 22:44:32,021 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9826
10
+ 2025-09-18 22:44:37,522 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0986 | Val mean-roc_auc_score: 0.9878
11
+ 2025-09-18 22:44:37,706 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 156
12
+ 2025-09-18 22:44:38,244 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.9878
13
+ 2025-09-18 22:44:43,710 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1035 | Val mean-roc_auc_score: 0.9888
14
+ 2025-09-18 22:44:43,896 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 208
15
+ 2025-09-18 22:44:44,445 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.9888
16
+ 2025-09-18 22:44:50,308 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0532 | Val mean-roc_auc_score: 0.9900
17
+ 2025-09-18 22:44:50,487 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 260
18
+ 2025-09-18 22:44:51,031 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.9900
19
+ 2025-09-18 22:44:56,616 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0524 | Val mean-roc_auc_score: 0.9834
20
+ 2025-09-18 22:45:02,182 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0448 | Val mean-roc_auc_score: 0.9883
21
+ 2025-09-18 22:45:07,846 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0284 | Val mean-roc_auc_score: 0.9817
22
+ 2025-09-18 22:45:13,845 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0769 | Val mean-roc_auc_score: 0.9852
23
+ 2025-09-18 22:45:19,499 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0396 | Val mean-roc_auc_score: 0.9910
24
+ 2025-09-18 22:45:19,680 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 520
25
+ 2025-09-18 22:45:20,223 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.9910
26
+ 2025-09-18 22:45:25,534 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0215 | Val mean-roc_auc_score: 0.9907
27
+ 2025-09-18 22:45:31,539 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0165 | Val mean-roc_auc_score: 0.9907
28
+ 2025-09-18 22:45:37,300 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0122 | Val mean-roc_auc_score: 0.9914
29
+ 2025-09-18 22:45:37,447 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 676
30
+ 2025-09-18 22:45:38,010 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 13 with val mean-roc_auc_score: 0.9914
31
+ 2025-09-18 22:45:43,763 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0079 | Val mean-roc_auc_score: 0.9909
32
+ 2025-09-18 22:45:49,107 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0089 | Val mean-roc_auc_score: 0.9887
33
+ 2025-09-18 22:45:54,775 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0187 | Val mean-roc_auc_score: 0.9916
34
+ 2025-09-18 22:45:55,258 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 832
35
+ 2025-09-18 22:45:55,808 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 16 with val mean-roc_auc_score: 0.9916
36
+ 2025-09-18 22:46:01,729 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0092 | Val mean-roc_auc_score: 0.9911
37
+ 2025-09-18 22:46:07,341 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.9915
38
+ 2025-09-18 22:46:12,744 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.9912
39
+ 2025-09-18 22:46:19,809 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.9916
40
+ 2025-09-18 22:46:19,991 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 1040
41
+ 2025-09-18 22:46:20,539 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 20 with val mean-roc_auc_score: 0.9916
42
+ 2025-09-18 22:46:26,127 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0183 | Val mean-roc_auc_score: 0.9894
43
+ 2025-09-18 22:46:32,048 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0240 | Val mean-roc_auc_score: 0.9837
44
+ 2025-09-18 22:46:37,688 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0236 | Val mean-roc_auc_score: 0.9796
45
+ 2025-09-18 22:46:43,847 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0508 | Val mean-roc_auc_score: 0.9898
46
+ 2025-09-18 22:46:49,843 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0236 | Val mean-roc_auc_score: 0.9927
47
+ 2025-09-18 22:46:50,051 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 1300
48
+ 2025-09-18 22:46:50,628 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 25 with val mean-roc_auc_score: 0.9927
49
+ 2025-09-18 22:46:56,391 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0134 | Val mean-roc_auc_score: 0.9896
50
+ 2025-09-18 22:47:02,587 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9906
51
+ 2025-09-18 22:47:08,488 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9906
52
+ 2025-09-18 22:47:14,119 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9911
53
+ 2025-09-18 22:47:19,254 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9909
54
+ 2025-09-18 22:47:24,667 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9913
55
+ 2025-09-18 22:47:30,150 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9909
56
+ 2025-09-18 22:47:35,335 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9907
57
+ 2025-09-18 22:47:40,953 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9916
58
+ 2025-09-18 22:47:46,797 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9908
59
+ 2025-09-18 22:47:52,621 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9915
60
+ 2025-09-18 22:47:58,602 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9903
61
+ 2025-09-18 22:48:04,624 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9908
62
+ 2025-09-18 22:48:10,719 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9904
63
+ 2025-09-18 22:48:16,546 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9905
64
+ 2025-09-18 22:48:22,162 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9908
65
+ 2025-09-18 22:48:27,976 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9912
66
+ 2025-09-18 22:48:33,734 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9902
67
+ 2025-09-18 22:48:39,636 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9909
68
+ 2025-09-18 22:48:45,447 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9910
69
+ 2025-09-18 22:48:51,057 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9912
70
+ 2025-09-18 22:48:56,809 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9910
71
+ 2025-09-18 22:49:02,398 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9908
72
+ 2025-09-18 22:49:08,262 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9908
73
+ 2025-09-18 22:49:14,033 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9897
74
+ 2025-09-18 22:49:19,914 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9898
75
+ 2025-09-18 22:49:25,528 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0293 | Val mean-roc_auc_score: 0.9900
76
+ 2025-09-18 22:49:31,327 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9896
77
+ 2025-09-18 22:49:37,287 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9901
78
+ 2025-09-18 22:49:43,662 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9905
79
+ 2025-09-18 22:49:49,452 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9906
80
+ 2025-09-18 22:49:55,590 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9909
81
+ 2025-09-18 22:50:02,109 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0205 | Val mean-roc_auc_score: 0.9888
82
+ 2025-09-18 22:50:07,780 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0715 | Val mean-roc_auc_score: 0.9854
83
+ 2025-09-18 22:50:13,131 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0328 | Val mean-roc_auc_score: 0.9846
84
+ 2025-09-18 22:50:18,737 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0195 | Val mean-roc_auc_score: 0.9920
85
+ 2025-09-18 22:50:24,863 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.9914
86
+ 2025-09-18 22:50:30,896 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.9927
87
+ 2025-09-18 22:50:36,572 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9929
88
+ 2025-09-18 22:50:36,719 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3328
89
+ 2025-09-18 22:50:37,279 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 64 with val mean-roc_auc_score: 0.9929
90
+ 2025-09-18 22:50:42,847 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9931
91
+ 2025-09-18 22:50:43,031 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3380
92
+ 2025-09-18 22:50:43,576 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 65 with val mean-roc_auc_score: 0.9931
93
+ 2025-09-18 22:50:49,512 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9930
94
+ 2025-09-18 22:50:55,434 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9934
95
+ 2025-09-18 22:50:55,617 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3484
96
+ 2025-09-18 22:50:56,150 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 67 with val mean-roc_auc_score: 0.9934
97
+ 2025-09-18 22:51:01,477 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9934
98
+ 2025-09-18 22:51:07,149 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9934
99
+ 2025-09-18 22:51:13,086 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9935
100
+ 2025-09-18 22:51:13,235 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3640
101
+ 2025-09-18 22:51:13,779 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 70 with val mean-roc_auc_score: 0.9935
102
+ 2025-09-18 22:51:19,352 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9934
103
+ 2025-09-18 22:51:24,968 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9935
104
+ 2025-09-18 22:51:30,297 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9933
105
+ 2025-09-18 22:51:35,830 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9933
106
+ 2025-09-18 22:51:41,642 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9933
107
+ 2025-09-18 22:51:47,511 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9933
108
+ 2025-09-18 22:51:54,278 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9934
109
+ 2025-09-18 22:52:00,059 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9934
110
+ 2025-09-18 22:52:05,908 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9934
111
+ 2025-09-18 22:52:11,336 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9933
112
+ 2025-09-18 22:52:16,931 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9934
113
+ 2025-09-18 22:52:22,667 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9933
114
+ 2025-09-18 22:52:28,537 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9933
115
+ 2025-09-18 22:52:34,393 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9932
116
+ 2025-09-18 22:52:40,039 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9935
117
+ 2025-09-18 22:52:45,717 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9935
118
+ 2025-09-18 22:52:46,164 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 4472
119
+ 2025-09-18 22:52:46,712 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 86 with val mean-roc_auc_score: 0.9935
120
+ 2025-09-18 22:52:52,580 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9933
121
+ 2025-09-18 22:52:58,472 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9934
122
+ 2025-09-18 22:53:04,068 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9934
123
+ 2025-09-18 22:53:09,622 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9934
124
+ 2025-09-18 22:53:15,112 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9934
125
+ 2025-09-18 22:53:21,309 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9934
126
+ 2025-09-18 22:53:27,107 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9935
127
+ 2025-09-18 22:53:32,769 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9935
128
+ 2025-09-18 22:53:37,743 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9935
129
+ 2025-09-18 22:53:43,263 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9933
130
+ 2025-09-18 22:53:50,186 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9935
131
+ 2025-09-18 22:53:55,750 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9936
132
+ 2025-09-18 22:53:55,900 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 5096
133
+ 2025-09-18 22:53:56,438 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 98 with val mean-roc_auc_score: 0.9936
134
+ 2025-09-18 22:54:02,038 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9935
135
+ 2025-09-18 22:54:07,908 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.9935
136
+ 2025-09-18 22:54:08,294 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7426
137
+ 2025-09-18 22:54:08,661 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset bbbp at 2025-09-18_22-54-08
138
+ 2025-09-18 22:54:13,951 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2392 | Val mean-roc_auc_score: 0.9697
139
+ 2025-09-18 22:54:13,952 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 52
140
+ 2025-09-18 22:54:14,476 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9697
141
+ 2025-09-18 22:54:20,108 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1699 | Val mean-roc_auc_score: 0.9835
142
+ 2025-09-18 22:54:20,279 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 104
143
+ 2025-09-18 22:54:20,816 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9835
144
+ 2025-09-18 22:54:26,640 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1160 | Val mean-roc_auc_score: 0.9892
145
+ 2025-09-18 22:54:26,821 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 156
146
+ 2025-09-18 22:54:27,373 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.9892
147
+ 2025-09-18 22:54:32,907 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1318 | Val mean-roc_auc_score: 0.9857
148
+ 2025-09-18 22:54:38,553 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0697 | Val mean-roc_auc_score: 0.9847
149
+ 2025-09-18 22:54:44,048 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0788 | Val mean-roc_auc_score: 0.9885
150
+ 2025-09-18 22:54:50,183 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0359 | Val mean-roc_auc_score: 0.9869
151
+ 2025-09-18 22:54:56,097 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0311 | Val mean-roc_auc_score: 0.9848
152
+ 2025-09-18 22:55:01,672 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0368 | Val mean-roc_auc_score: 0.9873
153
+ 2025-09-18 22:55:07,334 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0212 | Val mean-roc_auc_score: 0.9876
154
+ 2025-09-18 22:55:12,811 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0147 | Val mean-roc_auc_score: 0.9884
155
+ 2025-09-18 22:55:18,812 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0135 | Val mean-roc_auc_score: 0.9884
156
+ 2025-09-18 22:55:24,643 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.9880
157
+ 2025-09-18 22:55:30,444 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0088 | Val mean-roc_auc_score: 0.9880
158
+ 2025-09-18 22:55:36,305 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0099 | Val mean-roc_auc_score: 0.9863
159
+ 2025-09-18 22:55:41,918 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0144 | Val mean-roc_auc_score: 0.9890
160
+ 2025-09-18 22:55:47,179 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0185 | Val mean-roc_auc_score: 0.9747
161
+ 2025-09-18 22:55:52,676 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0252 | Val mean-roc_auc_score: 0.9824
162
+ 2025-09-18 22:55:58,295 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0703 | Val mean-roc_auc_score: 0.9810
163
+ 2025-09-18 22:56:04,536 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0434 | Val mean-roc_auc_score: 0.9821
164
+ 2025-09-18 22:56:09,974 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0218 | Val mean-roc_auc_score: 0.9866
165
+ 2025-09-18 22:56:15,794 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0104 | Val mean-roc_auc_score: 0.9856
166
+ 2025-09-18 22:56:21,313 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0191 | Val mean-roc_auc_score: 0.9824
167
+ 2025-09-18 22:56:27,111 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0118 | Val mean-roc_auc_score: 0.9857
168
+ 2025-09-18 22:56:32,976 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0079 | Val mean-roc_auc_score: 0.9881
169
+ 2025-09-18 22:56:38,624 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0089 | Val mean-roc_auc_score: 0.9867
170
+ 2025-09-18 22:56:44,553 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9868
171
+ 2025-09-18 22:56:49,773 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9879
172
+ 2025-09-18 22:56:54,926 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9871
173
+ 2025-09-18 22:56:59,768 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.9875
174
+ 2025-09-18 22:57:05,184 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9874
175
+ 2025-09-18 22:57:11,339 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9872
176
+ 2025-09-18 22:57:17,134 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9869
177
+ 2025-09-18 22:57:22,397 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9867
178
+ 2025-09-18 22:57:27,842 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9866
179
+ 2025-09-18 22:57:33,442 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9867
180
+ 2025-09-18 22:57:39,237 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9867
181
+ 2025-09-18 22:57:45,096 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9868
182
+ 2025-09-18 22:57:51,352 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9866
183
+ 2025-09-18 22:57:56,912 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9869
184
+ 2025-09-18 22:58:02,350 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9872
185
+ 2025-09-18 22:58:08,673 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9869
186
+ 2025-09-18 22:58:14,549 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9863
187
+ 2025-09-18 22:58:20,181 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9867
188
+ 2025-09-18 22:58:25,792 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9862
189
+ 2025-09-18 22:58:31,338 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9858
190
+ 2025-09-18 22:58:37,452 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9874
191
+ 2025-09-18 22:58:43,307 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0096 | Val mean-roc_auc_score: 0.9877
192
+ 2025-09-18 22:58:49,038 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.9848
193
+ 2025-09-18 22:58:54,812 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.9817
194
+ 2025-09-18 22:59:00,165 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0089 | Val mean-roc_auc_score: 0.9877
195
+ 2025-09-18 22:59:06,377 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9871
196
+ 2025-09-18 22:59:12,288 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.9865
197
+ 2025-09-18 22:59:17,937 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9855
198
+ 2025-09-18 22:59:23,282 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.9859
199
+ 2025-09-18 22:59:28,726 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9869
200
+ 2025-09-18 22:59:34,611 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9890
201
+ 2025-09-18 22:59:40,824 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0403 | Val mean-roc_auc_score: 0.9857
202
+ 2025-09-18 22:59:46,293 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0308 | Val mean-roc_auc_score: 0.9843
203
+ 2025-09-18 22:59:51,910 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0758 | Val mean-roc_auc_score: 0.9899
204
+ 2025-09-18 22:59:52,052 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3120
205
+ 2025-09-18 22:59:52,580 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 60 with val mean-roc_auc_score: 0.9899
206
+ 2025-09-18 22:59:58,396 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0251 | Val mean-roc_auc_score: 0.9899
207
+ 2025-09-18 22:59:58,923 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3172
208
+ 2025-09-18 22:59:59,468 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 61 with val mean-roc_auc_score: 0.9899
209
+ 2025-09-18 23:00:05,117 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0120 | Val mean-roc_auc_score: 0.9901
210
+ 2025-09-18 23:00:05,296 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3224
211
+ 2025-09-18 23:00:05,821 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 62 with val mean-roc_auc_score: 0.9901
212
+ 2025-09-18 23:00:11,143 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.9905
213
+ 2025-09-18 23:00:11,324 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3276
214
+ 2025-09-18 23:00:11,857 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 63 with val mean-roc_auc_score: 0.9905
215
+ 2025-09-18 23:00:17,684 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9903
216
+ 2025-09-18 23:00:23,369 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9906
217
+ 2025-09-18 23:00:23,548 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3380
218
+ 2025-09-18 23:00:24,073 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 65 with val mean-roc_auc_score: 0.9906
219
+ 2025-09-18 23:00:29,678 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9900
220
+ 2025-09-18 23:00:35,839 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9902
221
+ 2025-09-18 23:00:41,760 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9908
222
+ 2025-09-18 23:00:41,940 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3536
223
+ 2025-09-18 23:00:42,473 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 68 with val mean-roc_auc_score: 0.9908
224
+ 2025-09-18 23:00:47,987 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9909
225
+ 2025-09-18 23:00:48,166 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3588
226
+ 2025-09-18 23:00:48,689 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 69 with val mean-roc_auc_score: 0.9909
227
+ 2025-09-18 23:00:54,479 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9904
228
+ 2025-09-18 23:01:00,314 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9907
229
+ 2025-09-18 23:01:06,214 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9903
230
+ 2025-09-18 23:01:11,746 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9909
231
+ 2025-09-18 23:01:17,625 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9903
232
+ 2025-09-18 23:01:23,561 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9904
233
+ 2025-09-18 23:01:29,242 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9906
234
+ 2025-09-18 23:01:35,567 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9919
235
+ 2025-09-18 23:01:35,712 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 4004
236
+ 2025-09-18 23:01:36,247 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 77 with val mean-roc_auc_score: 0.9919
237
+ 2025-09-18 23:01:42,087 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9908
238
+ 2025-09-18 23:01:47,941 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9909
239
+ 2025-09-18 23:01:53,528 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9910
240
+ 2025-09-18 23:01:59,057 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9912
241
+ 2025-09-18 23:02:04,930 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9909
242
+ 2025-09-18 23:02:10,799 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9910
243
+ 2025-09-18 23:02:16,676 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9909
244
+ 2025-09-18 23:02:22,261 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.9909
245
+ 2025-09-18 23:02:27,890 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9911
246
+ 2025-09-18 23:02:33,700 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9909
247
+ 2025-09-18 23:02:39,627 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.9906
248
+ 2025-09-18 23:02:45,586 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9909
249
+ 2025-09-18 23:02:51,285 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9908
250
+ 2025-09-18 23:02:56,712 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9909
251
+ 2025-09-18 23:03:02,900 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9908
252
+ 2025-09-18 23:03:08,855 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9913
253
+ 2025-09-18 23:03:14,272 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9906
254
+ 2025-09-18 23:03:18,914 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9907
255
+ 2025-09-18 23:03:23,559 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9910
256
+ 2025-09-18 23:03:29,502 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9907
257
+ 2025-09-18 23:03:34,079 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9907
258
+ 2025-09-18 23:03:38,827 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9909
259
+ 2025-09-18 23:03:43,516 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9911
260
+ 2025-09-18 23:03:43,855 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7504
261
+ 2025-09-18 23:03:44,216 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset bbbp at 2025-09-18_23-03-44
262
+ 2025-09-18 23:03:48,548 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.2200 | Val mean-roc_auc_score: 0.9855
263
+ 2025-09-18 23:03:48,548 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 52
264
+ 2025-09-18 23:03:49,074 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9855
265
+ 2025-09-18 23:03:53,795 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.4062 | Val mean-roc_auc_score: 0.9882
266
+ 2025-09-18 23:03:53,944 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 104
267
+ 2025-09-18 23:03:54,464 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9882
268
+ 2025-09-18 23:03:59,088 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1028 | Val mean-roc_auc_score: 0.9847
269
+ 2025-09-18 23:04:03,717 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1006 | Val mean-roc_auc_score: 0.9895
270
+ 2025-09-18 23:04:03,895 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 208
271
+ 2025-09-18 23:04:04,418 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.9895
272
+ 2025-09-18 23:04:08,992 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0577 | Val mean-roc_auc_score: 0.9832
273
+ 2025-09-18 23:04:13,588 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0817 | Val mean-roc_auc_score: 0.9882
274
+ 2025-09-18 23:04:18,573 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0589 | Val mean-roc_auc_score: 0.9898
275
+ 2025-09-18 23:04:18,754 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 364
276
+ 2025-09-18 23:04:19,289 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val mean-roc_auc_score: 0.9898
277
+ 2025-09-18 23:04:23,911 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0242 | Val mean-roc_auc_score: 0.9895
278
+ 2025-09-18 23:04:28,533 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0178 | Val mean-roc_auc_score: 0.9907
279
+ 2025-09-18 23:04:28,710 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 468
280
+ 2025-09-18 23:04:29,235 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.9907
281
+ 2025-09-18 23:04:34,472 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.9883
282
+ 2025-09-18 23:04:39,687 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0132 | Val mean-roc_auc_score: 0.9914
283
+ 2025-09-18 23:04:40,196 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 572
284
+ 2025-09-18 23:04:40,717 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.9914
285
+ 2025-09-18 23:04:46,237 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0133 | Val mean-roc_auc_score: 0.9908
286
+ 2025-09-18 23:04:51,742 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.9910
287
+ 2025-09-18 23:04:57,065 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.9908
288
+ 2025-09-18 23:05:02,590 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0331 | Val mean-roc_auc_score: 0.9807
289
+ 2025-09-18 23:05:08,050 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0486 | Val mean-roc_auc_score: 0.9920
290
+ 2025-09-18 23:05:08,551 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 832
291
+ 2025-09-18 23:05:09,082 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 16 with val mean-roc_auc_score: 0.9920
292
+ 2025-09-18 23:05:14,344 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0308 | Val mean-roc_auc_score: 0.9923
293
+ 2025-09-18 23:05:14,519 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 884
294
+ 2025-09-18 23:05:15,049 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 17 with val mean-roc_auc_score: 0.9923
295
+ 2025-09-18 23:05:20,644 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0194 | Val mean-roc_auc_score: 0.9911
296
+ 2025-09-18 23:05:25,981 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0122 | Val mean-roc_auc_score: 0.9885
297
+ 2025-09-18 23:05:32,558 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.9908
298
+ 2025-09-18 23:05:37,773 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.9906
299
+ 2025-09-18 23:05:43,385 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.9908
300
+ 2025-09-18 23:05:48,681 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0083 | Val mean-roc_auc_score: 0.9891
301
+ 2025-09-18 23:05:53,900 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.9918
302
+ 2025-09-18 23:05:59,510 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9917
303
+ 2025-09-18 23:06:05,027 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9922
304
+ 2025-09-18 23:06:10,670 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9921
305
+ 2025-09-18 23:06:16,032 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9923
306
+ 2025-09-18 23:06:21,146 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0103 | Val mean-roc_auc_score: 0.9911
307
+ 2025-09-18 23:06:26,527 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0150 | Val mean-roc_auc_score: 0.9879
308
+ 2025-09-18 23:06:31,744 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0299 | Val mean-roc_auc_score: 0.9847
309
+ 2025-09-18 23:06:37,466 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.9855
310
+ 2025-09-18 23:06:42,764 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0126 | Val mean-roc_auc_score: 0.9880
311
+ 2025-09-18 23:06:48,090 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.9890
312
+ 2025-09-18 23:06:53,386 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9869
313
+ 2025-09-18 23:06:58,682 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9876
314
+ 2025-09-18 23:07:04,306 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9884
315
+ 2025-09-18 23:07:09,556 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9888
316
+ 2025-09-18 23:07:15,351 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9890
317
+ 2025-09-18 23:07:19,911 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.9897
318
+ 2025-09-18 23:07:24,393 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9900
319
+ 2025-09-18 23:07:29,280 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9905
320
+ 2025-09-18 23:07:33,798 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9905
321
+ 2025-09-18 23:07:38,341 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9893
322
+ 2025-09-18 23:07:42,973 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9892
323
+ 2025-09-18 23:07:47,493 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9898
324
+ 2025-09-18 23:07:52,605 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9897
325
+ 2025-09-18 23:07:57,564 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9901
326
+ 2025-09-18 23:08:04,433 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9894
327
+ 2025-09-18 23:08:09,191 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.9900
328
+ 2025-09-18 23:08:15,935 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9883
329
+ 2025-09-18 23:08:22,137 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0093 | Val mean-roc_auc_score: 0.9919
330
+ 2025-09-18 23:08:28,078 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9910
331
+ 2025-09-18 23:08:34,852 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.9906
332
+ 2025-09-18 23:08:39,792 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0110 | Val mean-roc_auc_score: 0.9917
333
+ 2025-09-18 23:08:44,413 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0664 | Val mean-roc_auc_score: 0.9859
334
+ 2025-09-18 23:08:49,239 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0520 | Val mean-roc_auc_score: 0.9909
335
+ 2025-09-18 23:08:54,808 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0165 | Val mean-roc_auc_score: 0.9908
336
+ 2025-09-18 23:08:59,447 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.9923
337
+ 2025-09-18 23:09:03,987 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9921
338
+ 2025-09-18 23:09:08,698 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9923
339
+ 2025-09-18 23:09:13,581 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9924
340
+ 2025-09-18 23:09:13,724 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3224
341
+ 2025-09-18 23:09:14,281 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 62 with val mean-roc_auc_score: 0.9924
342
+ 2025-09-18 23:09:18,921 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9925
343
+ 2025-09-18 23:09:19,106 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3276
344
+ 2025-09-18 23:09:19,646 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 63 with val mean-roc_auc_score: 0.9925
345
+ 2025-09-18 23:09:24,182 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9926
346
+ 2025-09-18 23:09:24,365 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3328
347
+ 2025-09-18 23:09:24,906 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 64 with val mean-roc_auc_score: 0.9926
348
+ 2025-09-18 23:09:29,385 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9927
349
+ 2025-09-18 23:09:29,563 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3380
350
+ 2025-09-18 23:09:30,107 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 65 with val mean-roc_auc_score: 0.9927
351
+ 2025-09-18 23:09:34,566 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9929
352
+ 2025-09-18 23:09:35,068 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3432
353
+ 2025-09-18 23:09:35,627 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 66 with val mean-roc_auc_score: 0.9929
354
+ 2025-09-18 23:09:40,283 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9927
355
+ 2025-09-18 23:09:44,941 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9929
356
+ 2025-09-18 23:09:49,609 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9926
357
+ 2025-09-18 23:09:54,088 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.9928
358
+ 2025-09-18 23:09:58,583 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9928
359
+ 2025-09-18 23:10:03,417 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9928
360
+ 2025-09-18 23:10:07,904 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9928
361
+ 2025-09-18 23:10:12,471 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9928
362
+ 2025-09-18 23:10:17,105 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.9936
363
+ 2025-09-18 23:10:17,251 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3900
364
+ 2025-09-18 23:10:17,791 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 75 with val mean-roc_auc_score: 0.9936
365
+ 2025-09-18 23:10:22,421 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9937
366
+ 2025-09-18 23:10:22,940 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Global step of best model: 3952
367
+ 2025-09-18 23:10:23,501 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Best model saved at epoch 76 with val mean-roc_auc_score: 0.9937
368
+ 2025-09-18 23:10:28,940 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0008 | Val mean-roc_auc_score: 0.9935
369
+ 2025-09-18 23:10:33,397 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.9934
370
+ 2025-09-18 23:10:37,899 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9935
371
+ 2025-09-18 23:10:42,435 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.9934
372
+ 2025-09-18 23:10:47,042 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9931
373
+ 2025-09-18 23:10:51,917 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9933
374
+ 2025-09-18 23:10:56,658 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.9933
375
+ 2025-09-18 23:11:01,223 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9930
376
+ 2025-09-18 23:11:05,957 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9933
377
+ 2025-09-18 23:11:10,805 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9930
378
+ 2025-09-18 23:11:16,090 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9928
379
+ 2025-09-18 23:11:21,889 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.9930
380
+ 2025-09-18 23:11:27,567 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0014 | Val mean-roc_auc_score: 0.9931
381
+ 2025-09-18 23:11:33,064 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9932
382
+ 2025-09-18 23:11:38,673 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9932
383
+ 2025-09-18 23:11:44,458 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9930
384
+ 2025-09-18 23:11:49,906 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9930
385
+ 2025-09-18 23:11:55,282 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.9933
386
+ 2025-09-18 23:12:00,287 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9930
387
+ 2025-09-18 23:12:05,646 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.9930
388
+ 2025-09-18 23:12:12,210 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9932
389
+ 2025-09-18 23:12:17,656 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.9932
390
+ 2025-09-18 23:12:23,200 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.9932
391
+ 2025-09-18 23:12:28,638 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.9933
392
+ 2025-09-18 23:12:29,075 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7275
393
+ 2025-09-18 23:12:29,455 - logs_modchembert_bbbp_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.7402, Std Dev: 0.0095
logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_clintox_epochs100_batch_size32_20250919_005825.log ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-19 00:58:25,257 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Running benchmark for dataset: clintox
2
+ 2025-09-19 00:58:25,257 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - dataset: clintox, tasks: ['FDA_APPROVED', 'CT_TOX'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-19 00:58:25,270 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset clintox at 2025-09-19_00-58-25
4
+ 2025-09-19 00:58:30,448 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1115 | Val mean-roc_auc_score: 0.9711
5
+ 2025-09-19 00:58:30,448 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 37
6
+ 2025-09-19 00:58:30,974 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9711
7
+ 2025-09-19 00:58:36,759 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0380 | Val mean-roc_auc_score: 0.9856
8
+ 2025-09-19 00:58:36,905 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 74
9
+ 2025-09-19 00:58:37,433 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9856
10
+ 2025-09-19 00:58:43,060 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0266 | Val mean-roc_auc_score: 0.9656
11
+ 2025-09-19 00:58:49,082 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0298 | Val mean-roc_auc_score: 0.9766
12
+ 2025-09-19 00:58:55,105 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0283 | Val mean-roc_auc_score: 0.9835
13
+ 2025-09-19 00:59:01,267 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0180 | Val mean-roc_auc_score: 0.9857
14
+ 2025-09-19 00:59:01,920 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 222
15
+ 2025-09-19 00:59:02,497 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.9857
16
+ 2025-09-19 00:59:08,420 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0209 | Val mean-roc_auc_score: 0.9846
17
+ 2025-09-19 00:59:14,315 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0179 | Val mean-roc_auc_score: 0.9851
18
+ 2025-09-19 00:59:20,090 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0157 | Val mean-roc_auc_score: 0.9852
19
+ 2025-09-19 00:59:26,477 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0122 | Val mean-roc_auc_score: 0.9863
20
+ 2025-09-19 00:59:26,620 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 370
21
+ 2025-09-19 00:59:27,148 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.9863
22
+ 2025-09-19 00:59:32,898 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9887
23
+ 2025-09-19 00:59:33,507 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 407
24
+ 2025-09-19 00:59:34,054 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val mean-roc_auc_score: 0.9887
25
+ 2025-09-19 00:59:39,671 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.9869
26
+ 2025-09-19 00:59:45,455 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.9850
27
+ 2025-09-19 00:59:51,495 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0143 | Val mean-roc_auc_score: 0.9875
28
+ 2025-09-19 00:59:57,699 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0175 | Val mean-roc_auc_score: 0.9875
29
+ 2025-09-19 01:00:03,902 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0272 | Val mean-roc_auc_score: 0.9800
30
+ 2025-09-19 01:00:10,352 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0147 | Val mean-roc_auc_score: 0.9765
31
+ 2025-09-19 01:00:16,401 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0268 | Val mean-roc_auc_score: 0.9858
32
+ 2025-09-19 01:00:22,719 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0286 | Val mean-roc_auc_score: 0.9859
33
+ 2025-09-19 01:00:28,849 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0160 | Val mean-roc_auc_score: 0.9869
34
+ 2025-09-19 01:00:34,970 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.9881
35
+ 2025-09-19 01:00:41,316 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0111 | Val mean-roc_auc_score: 0.9881
36
+ 2025-09-19 01:00:47,459 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.9893
37
+ 2025-09-19 01:00:47,603 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 851
38
+ 2025-09-19 01:00:48,143 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 23 with val mean-roc_auc_score: 0.9893
39
+ 2025-09-19 01:00:53,835 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0184 | Val mean-roc_auc_score: 0.9858
40
+ 2025-09-19 01:00:59,934 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.9881
41
+ 2025-09-19 01:01:05,935 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0092 | Val mean-roc_auc_score: 0.9887
42
+ 2025-09-19 01:01:13,090 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0094 | Val mean-roc_auc_score: 0.9893
43
+ 2025-09-19 01:01:18,686 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0103 | Val mean-roc_auc_score: 0.9903
44
+ 2025-09-19 01:01:18,830 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1036
45
+ 2025-09-19 01:01:19,373 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 28 with val mean-roc_auc_score: 0.9903
46
+ 2025-09-19 01:01:24,943 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0113 | Val mean-roc_auc_score: 0.9897
47
+ 2025-09-19 01:01:30,418 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0170 | Val mean-roc_auc_score: 0.9891
48
+ 2025-09-19 01:01:36,707 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.9903
49
+ 2025-09-19 01:01:43,959 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.9901
50
+ 2025-09-19 01:01:50,775 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0093 | Val mean-roc_auc_score: 0.9914
51
+ 2025-09-19 01:01:50,921 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1221
52
+ 2025-09-19 01:01:51,488 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 33 with val mean-roc_auc_score: 0.9914
53
+ 2025-09-19 01:01:57,123 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.9887
54
+ 2025-09-19 01:02:02,414 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0198 | Val mean-roc_auc_score: 0.9937
55
+ 2025-09-19 01:02:02,559 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1295
56
+ 2025-09-19 01:02:03,118 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 35 with val mean-roc_auc_score: 0.9937
57
+ 2025-09-19 01:02:09,301 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0145 | Val mean-roc_auc_score: 0.9898
58
+ 2025-09-19 01:02:15,984 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.9931
59
+ 2025-09-19 01:02:22,074 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9887
60
+ 2025-09-19 01:02:27,557 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.9926
61
+ 2025-09-19 01:02:33,224 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.9893
62
+ 2025-09-19 01:02:39,337 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.9864
63
+ 2025-09-19 01:02:46,346 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0099 | Val mean-roc_auc_score: 0.9915
64
+ 2025-09-19 01:02:51,853 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.9898
65
+ 2025-09-19 01:02:57,626 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.9894
66
+ 2025-09-19 01:03:03,734 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.9894
67
+ 2025-09-19 01:03:10,178 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9925
68
+ 2025-09-19 01:03:17,011 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0225 | Val mean-roc_auc_score: 0.9920
69
+ 2025-09-19 01:03:22,850 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0194 | Val mean-roc_auc_score: 0.9903
70
+ 2025-09-19 01:03:28,736 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0092 | Val mean-roc_auc_score: 0.9909
71
+ 2025-09-19 01:03:34,411 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.9915
72
+ 2025-09-19 01:03:40,889 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.9883
73
+ 2025-09-19 01:03:47,513 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9878
74
+ 2025-09-19 01:03:53,011 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.9921
75
+ 2025-09-19 01:03:59,007 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9888
76
+ 2025-09-19 01:04:05,679 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9899
77
+ 2025-09-19 01:04:12,146 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9887
78
+ 2025-09-19 01:04:19,152 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0007 | Val mean-roc_auc_score: 0.9872
79
+ 2025-09-19 01:04:25,073 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9861
80
+ 2025-09-19 01:04:30,728 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9872
81
+ 2025-09-19 01:04:36,673 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0107 | Val mean-roc_auc_score: 0.9893
82
+ 2025-09-19 01:04:43,086 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0186 | Val mean-roc_auc_score: 0.9909
83
+ 2025-09-19 01:04:49,845 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9895
84
+ 2025-09-19 01:04:55,965 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9890
85
+ 2025-09-19 01:05:01,634 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9879
86
+ 2025-09-19 01:05:07,859 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9868
87
+ 2025-09-19 01:05:14,058 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9865
88
+ 2025-09-19 01:05:20,800 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.9865
89
+ 2025-09-19 01:05:27,086 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9863
90
+ 2025-09-19 01:05:33,272 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9863
91
+ 2025-09-19 01:05:38,897 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9728
92
+ 2025-09-19 01:05:45,099 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9857
93
+ 2025-09-19 01:05:51,398 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9923
94
+ 2025-09-19 01:05:57,690 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0004 | Val mean-roc_auc_score: 0.9879
95
+ 2025-09-19 01:06:04,792 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9871
96
+ 2025-09-19 01:06:11,372 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9825
97
+ 2025-09-19 01:06:17,440 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9874
98
+ 2025-09-19 01:06:23,916 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9869
99
+ 2025-09-19 01:06:29,790 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9870
100
+ 2025-09-19 01:06:36,392 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9863
101
+ 2025-09-19 01:06:43,166 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9770
102
+ 2025-09-19 01:06:49,058 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9880
103
+ 2025-09-19 01:06:56,875 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9901
104
+ 2025-09-19 01:07:02,545 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9682
105
+ 2025-09-19 01:07:08,570 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9886
106
+ 2025-09-19 01:07:15,044 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9872
107
+ 2025-09-19 01:07:21,599 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9767
108
+ 2025-09-19 01:07:28,033 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9445
109
+ 2025-09-19 01:07:33,701 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.9784
110
+ 2025-09-19 01:07:39,769 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0113 | Val mean-roc_auc_score: 0.9922
111
+ 2025-09-19 01:07:46,183 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0189 | Val mean-roc_auc_score: 0.9883
112
+ 2025-09-19 01:07:52,851 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0154 | Val mean-roc_auc_score: 0.9890
113
+ 2025-09-19 01:08:00,158 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.9912
114
+ 2025-09-19 01:08:05,864 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9904
115
+ 2025-09-19 01:08:11,510 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9896
116
+ 2025-09-19 01:08:17,510 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9896
117
+ 2025-09-19 01:08:23,991 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9901
118
+ 2025-09-19 01:08:30,890 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9886
119
+ 2025-09-19 01:08:37,022 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9833
120
+ 2025-09-19 01:08:42,499 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9880
121
+ 2025-09-19 01:08:48,753 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9862
122
+ 2025-09-19 01:08:49,876 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.9912
123
+ 2025-09-19 01:08:50,389 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset clintox at 2025-09-19_01-08-50
124
+ 2025-09-19 01:08:55,467 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1233 | Val mean-roc_auc_score: 0.9758
125
+ 2025-09-19 01:08:55,467 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 37
126
+ 2025-09-19 01:08:56,016 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9758
127
+ 2025-09-19 01:09:02,295 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0389 | Val mean-roc_auc_score: 0.9795
128
+ 2025-09-19 01:09:02,427 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 74
129
+ 2025-09-19 01:09:02,945 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9795
130
+ 2025-09-19 01:09:09,095 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0263 | Val mean-roc_auc_score: 0.9841
131
+ 2025-09-19 01:09:09,268 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 111
132
+ 2025-09-19 01:09:09,806 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.9841
133
+ 2025-09-19 01:09:15,733 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0244 | Val mean-roc_auc_score: 0.9827
134
+ 2025-09-19 01:09:21,861 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0260 | Val mean-roc_auc_score: 0.9852
135
+ 2025-09-19 01:09:22,040 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 185
136
+ 2025-09-19 01:09:22,582 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.9852
137
+ 2025-09-19 01:09:28,486 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0210 | Val mean-roc_auc_score: 0.9898
138
+ 2025-09-19 01:09:29,094 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 222
139
+ 2025-09-19 01:09:29,640 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.9898
140
+ 2025-09-19 01:09:35,195 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0167 | Val mean-roc_auc_score: 0.9881
141
+ 2025-09-19 01:09:41,017 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0134 | Val mean-roc_auc_score: 0.9850
142
+ 2025-09-19 01:09:47,299 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0145 | Val mean-roc_auc_score: 0.9859
143
+ 2025-09-19 01:09:53,610 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0128 | Val mean-roc_auc_score: 0.9882
144
+ 2025-09-19 01:10:00,204 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0130 | Val mean-roc_auc_score: 0.9865
145
+ 2025-09-19 01:10:06,870 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0116 | Val mean-roc_auc_score: 0.9877
146
+ 2025-09-19 01:10:12,366 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0102 | Val mean-roc_auc_score: 0.9893
147
+ 2025-09-19 01:10:18,153 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0171 | Val mean-roc_auc_score: 0.9893
148
+ 2025-09-19 01:10:24,426 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0117 | Val mean-roc_auc_score: 0.9887
149
+ 2025-09-19 01:10:31,027 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0102 | Val mean-roc_auc_score: 0.9877
150
+ 2025-09-19 01:10:38,046 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0241 | Val mean-roc_auc_score: 0.9909
151
+ 2025-09-19 01:10:38,202 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 629
152
+ 2025-09-19 01:10:38,745 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 17 with val mean-roc_auc_score: 0.9909
153
+ 2025-09-19 01:10:44,864 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0378 | Val mean-roc_auc_score: 0.9904
154
+ 2025-09-19 01:10:50,760 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1009 | Val mean-roc_auc_score: 0.9877
155
+ 2025-09-19 01:10:56,493 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0228 | Val mean-roc_auc_score: 0.9910
156
+ 2025-09-19 01:10:56,651 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 740
157
+ 2025-09-19 01:10:57,189 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 20 with val mean-roc_auc_score: 0.9910
158
+ 2025-09-19 01:11:02,720 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0129 | Val mean-roc_auc_score: 0.9894
159
+ 2025-09-19 01:11:09,388 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0176 | Val mean-roc_auc_score: 0.9909
160
+ 2025-09-19 01:11:15,445 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.9905
161
+ 2025-09-19 01:11:21,427 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.9900
162
+ 2025-09-19 01:11:27,487 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0084 | Val mean-roc_auc_score: 0.9899
163
+ 2025-09-19 01:11:33,308 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.9894
164
+ 2025-09-19 01:11:40,619 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.9911
165
+ 2025-09-19 01:11:40,778 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 999
166
+ 2025-09-19 01:11:41,339 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 27 with val mean-roc_auc_score: 0.9911
167
+ 2025-09-19 01:11:47,104 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.9878
168
+ 2025-09-19 01:11:53,421 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.9844
169
+ 2025-09-19 01:12:00,079 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0111 | Val mean-roc_auc_score: 0.9877
170
+ 2025-09-19 01:12:05,733 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0136 | Val mean-roc_auc_score: 0.9889
171
+ 2025-09-19 01:12:11,931 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0169 | Val mean-roc_auc_score: 0.9916
172
+ 2025-09-19 01:12:12,076 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1184
173
+ 2025-09-19 01:12:12,620 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 32 with val mean-roc_auc_score: 0.9916
174
+ 2025-09-19 01:12:18,145 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0094 | Val mean-roc_auc_score: 0.9880
175
+ 2025-09-19 01:12:23,946 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0088 | Val mean-roc_auc_score: 0.9882
176
+ 2025-09-19 01:12:30,361 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.9889
177
+ 2025-09-19 01:12:36,333 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0149 | Val mean-roc_auc_score: 0.9905
178
+ 2025-09-19 01:12:42,136 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0088 | Val mean-roc_auc_score: 0.9931
179
+ 2025-09-19 01:12:42,283 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1369
180
+ 2025-09-19 01:12:42,822 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 37 with val mean-roc_auc_score: 0.9931
181
+ 2025-09-19 01:12:48,605 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.9910
182
+ 2025-09-19 01:12:54,997 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.9899
183
+ 2025-09-19 01:13:01,635 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.9900
184
+ 2025-09-19 01:13:07,701 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0206 | Val mean-roc_auc_score: 0.9938
185
+ 2025-09-19 01:13:08,332 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1517
186
+ 2025-09-19 01:13:08,877 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 41 with val mean-roc_auc_score: 0.9938
187
+ 2025-09-19 01:13:14,969 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0169 | Val mean-roc_auc_score: 0.9938
188
+ 2025-09-19 01:13:20,735 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0087 | Val mean-roc_auc_score: 0.9910
189
+ 2025-09-19 01:13:26,675 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0140 | Val mean-roc_auc_score: 0.9877
190
+ 2025-09-19 01:13:32,766 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0097 | Val mean-roc_auc_score: 0.9883
191
+ 2025-09-19 01:13:39,389 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0005 | Val mean-roc_auc_score: 0.9888
192
+ 2025-09-19 01:13:45,932 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.9883
193
+ 2025-09-19 01:13:51,356 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.9888
194
+ 2025-09-19 01:13:57,580 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9888
195
+ 2025-09-19 01:14:03,803 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.9872
196
+ 2025-09-19 01:14:10,216 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.9882
197
+ 2025-09-19 01:14:16,882 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9883
198
+ 2025-09-19 01:14:22,842 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.9872
199
+ 2025-09-19 01:14:28,793 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.9883
200
+ 2025-09-19 01:14:35,465 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.9883
201
+ 2025-09-19 01:14:41,951 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.9921
202
+ 2025-09-19 01:14:48,675 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.9939
203
+ 2025-09-19 01:14:48,818 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 2109
204
+ 2025-09-19 01:14:49,355 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 57 with val mean-roc_auc_score: 0.9939
205
+ 2025-09-19 01:14:55,790 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.9911
206
+ 2025-09-19 01:15:01,571 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.9916
207
+ 2025-09-19 01:15:07,613 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.9894
208
+ 2025-09-19 01:15:13,544 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.9916
209
+ 2025-09-19 01:15:20,109 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0117 | Val mean-roc_auc_score: 0.9805
210
+ 2025-09-19 01:15:26,466 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0297 | Val mean-roc_auc_score: 0.9829
211
+ 2025-09-19 01:15:33,211 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0202 | Val mean-roc_auc_score: 0.9930
212
+ 2025-09-19 01:15:39,099 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0178 | Val mean-roc_auc_score: 0.9908
213
+ 2025-09-19 01:15:44,985 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.9884
214
+ 2025-09-19 01:15:51,328 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0137 | Val mean-roc_auc_score: 0.9908
215
+ 2025-09-19 01:15:57,924 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0110 | Val mean-roc_auc_score: 0.9916
216
+ 2025-09-19 01:16:04,824 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.9888
217
+ 2025-09-19 01:16:11,824 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.9894
218
+ 2025-09-19 01:16:18,369 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9899
219
+ 2025-09-19 01:16:24,590 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.9900
220
+ 2025-09-19 01:16:30,821 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.9905
221
+ 2025-09-19 01:16:36,992 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9904
222
+ 2025-09-19 01:16:43,496 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9815
223
+ 2025-09-19 01:16:49,696 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.9763
224
+ 2025-09-19 01:16:55,824 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9794
225
+ 2025-09-19 01:17:01,888 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.9935
226
+ 2025-09-19 01:17:07,701 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0090 | Val mean-roc_auc_score: 0.9894
227
+ 2025-09-19 01:17:14,436 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9884
228
+ 2025-09-19 01:17:21,271 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9870
229
+ 2025-09-19 01:17:28,649 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9884
230
+ 2025-09-19 01:17:35,109 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.9879
231
+ 2025-09-19 01:17:41,836 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0088 | Val mean-roc_auc_score: 0.9889
232
+ 2025-09-19 01:17:48,036 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9862
233
+ 2025-09-19 01:17:53,788 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.9928
234
+ 2025-09-19 01:18:00,278 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9894
235
+ 2025-09-19 01:18:06,283 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9863
236
+ 2025-09-19 01:18:12,988 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9939
237
+ 2025-09-19 01:18:19,680 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9895
238
+ 2025-09-19 01:18:25,836 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9885
239
+ 2025-09-19 01:18:31,758 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.9873
240
+ 2025-09-19 01:18:37,873 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9873
241
+ 2025-09-19 01:18:44,140 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9894
242
+ 2025-09-19 01:18:50,696 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9862
243
+ 2025-09-19 01:18:57,270 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.9878
244
+ 2025-09-19 01:19:03,710 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9873
245
+ 2025-09-19 01:19:09,312 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9853
246
+ 2025-09-19 01:19:15,311 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9875
247
+ 2025-09-19 01:19:21,788 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9836
248
+ 2025-09-19 01:19:22,985 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.9626
249
+ 2025-09-19 01:19:23,498 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset clintox at 2025-09-19_01-19-23
250
+ 2025-09-19 01:19:28,907 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1149 | Val mean-roc_auc_score: 0.9696
251
+ 2025-09-19 01:19:28,907 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 37
252
+ 2025-09-19 01:19:29,426 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.9696
253
+ 2025-09-19 01:19:36,029 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0367 | Val mean-roc_auc_score: 0.9815
254
+ 2025-09-19 01:19:36,180 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 74
255
+ 2025-09-19 01:19:36,737 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.9815
256
+ 2025-09-19 01:19:42,884 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0236 | Val mean-roc_auc_score: 0.9808
257
+ 2025-09-19 01:19:48,696 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0268 | Val mean-roc_auc_score: 0.9857
258
+ 2025-09-19 01:19:48,882 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 148
259
+ 2025-09-19 01:19:49,430 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.9857
260
+ 2025-09-19 01:19:55,140 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0289 | Val mean-roc_auc_score: 0.9886
261
+ 2025-09-19 01:19:55,319 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 185
262
+ 2025-09-19 01:19:55,858 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.9886
263
+ 2025-09-19 01:20:01,254 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0211 | Val mean-roc_auc_score: 0.9868
264
+ 2025-09-19 01:20:07,444 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0188 | Val mean-roc_auc_score: 0.9827
265
+ 2025-09-19 01:20:13,628 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0170 | Val mean-roc_auc_score: 0.9874
266
+ 2025-09-19 01:20:20,297 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0195 | Val mean-roc_auc_score: 0.9863
267
+ 2025-09-19 01:20:26,314 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.9887
268
+ 2025-09-19 01:20:26,499 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 370
269
+ 2025-09-19 01:20:27,039 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.9887
270
+ 2025-09-19 01:20:32,558 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.9881
271
+ 2025-09-19 01:20:38,573 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0202 | Val mean-roc_auc_score: 0.9870
272
+ 2025-09-19 01:20:44,682 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0157 | Val mean-roc_auc_score: 0.9875
273
+ 2025-09-19 01:20:51,381 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0170 | Val mean-roc_auc_score: 0.9871
274
+ 2025-09-19 01:20:57,672 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0121 | Val mean-roc_auc_score: 0.9849
275
+ 2025-09-19 01:21:03,372 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0136 | Val mean-roc_auc_score: 0.9839
276
+ 2025-09-19 01:21:09,337 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0086 | Val mean-roc_auc_score: 0.9867
277
+ 2025-09-19 01:21:15,614 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0149 | Val mean-roc_auc_score: 0.9928
278
+ 2025-09-19 01:21:15,761 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 666
279
+ 2025-09-19 01:21:16,303 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 18 with val mean-roc_auc_score: 0.9928
280
+ 2025-09-19 01:21:22,733 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0417 | Val mean-roc_auc_score: 0.9887
281
+ 2025-09-19 01:21:29,171 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0147 | Val mean-roc_auc_score: 0.9881
282
+ 2025-09-19 01:21:34,658 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0118 | Val mean-roc_auc_score: 0.9887
283
+ 2025-09-19 01:21:40,637 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0140 | Val mean-roc_auc_score: 0.9880
284
+ 2025-09-19 01:21:46,714 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0123 | Val mean-roc_auc_score: 0.9854
285
+ 2025-09-19 01:21:53,303 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0192 | Val mean-roc_auc_score: 0.9782
286
+ 2025-09-19 01:21:59,378 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0197 | Val mean-roc_auc_score: 0.9824
287
+ 2025-09-19 01:22:05,065 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0186 | Val mean-roc_auc_score: 0.9725
288
+ 2025-09-19 01:22:12,438 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.9575
289
+ 2025-09-19 01:22:18,086 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.9761
290
+ 2025-09-19 01:22:24,463 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0083 | Val mean-roc_auc_score: 0.9801
291
+ 2025-09-19 01:22:31,098 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9843
292
+ 2025-09-19 01:22:37,157 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0077 | Val mean-roc_auc_score: 0.9796
293
+ 2025-09-19 01:22:43,361 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0156 | Val mean-roc_auc_score: 0.9867
294
+ 2025-09-19 01:22:49,679 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0143 | Val mean-roc_auc_score: 0.9859
295
+ 2025-09-19 01:22:55,831 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0092 | Val mean-roc_auc_score: 0.9864
296
+ 2025-09-19 01:23:02,299 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.9900
297
+ 2025-09-19 01:23:09,094 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.9894
298
+ 2025-09-19 01:23:16,014 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0169 | Val mean-roc_auc_score: 0.9847
299
+ 2025-09-19 01:23:21,984 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.9878
300
+ 2025-09-19 01:23:27,785 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0079 | Val mean-roc_auc_score: 0.9829
301
+ 2025-09-19 01:23:34,031 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.9822
302
+ 2025-09-19 01:23:40,373 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.9803
303
+ 2025-09-19 01:23:47,265 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.9849
304
+ 2025-09-19 01:23:53,486 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0126 | Val mean-roc_auc_score: 0.9847
305
+ 2025-09-19 01:23:58,936 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0170 | Val mean-roc_auc_score: 0.9892
306
+ 2025-09-19 01:24:05,104 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0228 | Val mean-roc_auc_score: 0.9920
307
+ 2025-09-19 01:24:11,310 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.9931
308
+ 2025-09-19 01:24:11,916 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 1702
309
+ 2025-09-19 01:24:12,463 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 46 with val mean-roc_auc_score: 0.9931
310
+ 2025-09-19 01:24:18,550 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.9920
311
+ 2025-09-19 01:24:24,791 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.9893
312
+ 2025-09-19 01:24:30,975 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.9925
313
+ 2025-09-19 01:24:36,411 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.9920
314
+ 2025-09-19 01:24:42,519 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.9904
315
+ 2025-09-19 01:24:48,821 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9903
316
+ 2025-09-19 01:24:54,302 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9892
317
+ 2025-09-19 01:24:59,493 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9904
318
+ 2025-09-19 01:25:06,077 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.9902
319
+ 2025-09-19 01:25:11,915 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9875
320
+ 2025-09-19 01:25:18,174 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.9899
321
+ 2025-09-19 01:25:23,327 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.9924
322
+ 2025-09-19 01:25:27,948 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.9929
323
+ 2025-09-19 01:25:32,986 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.9915
324
+ 2025-09-19 01:25:38,565 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.9893
325
+ 2025-09-19 01:25:44,627 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.9920
326
+ 2025-09-19 01:25:49,980 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9914
327
+ 2025-09-19 01:25:54,891 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.9925
328
+ 2025-09-19 01:25:59,763 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9925
329
+ 2025-09-19 01:26:04,715 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9925
330
+ 2025-09-19 01:26:10,915 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9903
331
+ 2025-09-19 01:26:16,381 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0135 | Val mean-roc_auc_score: 0.9891
332
+ 2025-09-19 01:26:21,277 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0129 | Val mean-roc_auc_score: 0.9904
333
+ 2025-09-19 01:26:26,082 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0076 | Val mean-roc_auc_score: 0.9931
334
+ 2025-09-19 01:26:31,328 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.9909
335
+ 2025-09-19 01:26:37,350 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.9907
336
+ 2025-09-19 01:26:43,081 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.9909
337
+ 2025-09-19 01:26:47,647 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.9909
338
+ 2025-09-19 01:26:52,461 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.9898
339
+ 2025-09-19 01:26:57,723 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9877
340
+ 2025-09-19 01:27:04,014 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.9895
341
+ 2025-09-19 01:27:09,624 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0107 | Val mean-roc_auc_score: 0.9922
342
+ 2025-09-19 01:27:14,630 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0106 | Val mean-roc_auc_score: 0.9949
343
+ 2025-09-19 01:27:14,782 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 2923
344
+ 2025-09-19 01:27:15,331 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 79 with val mean-roc_auc_score: 0.9949
345
+ 2025-09-19 01:27:19,932 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.9891
346
+ 2025-09-19 01:27:25,053 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0081 | Val mean-roc_auc_score: 0.9953
347
+ 2025-09-19 01:27:25,713 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Global step of best model: 2997
348
+ 2025-09-19 01:27:26,265 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Best model saved at epoch 81 with val mean-roc_auc_score: 0.9953
349
+ 2025-09-19 01:27:32,417 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0213 | Val mean-roc_auc_score: 0.9900
350
+ 2025-09-19 01:27:37,213 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0090 | Val mean-roc_auc_score: 0.9887
351
+ 2025-09-19 01:27:42,313 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.9893
352
+ 2025-09-19 01:27:48,021 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.9904
353
+ 2025-09-19 01:27:53,594 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.9844
354
+ 2025-09-19 01:27:59,449 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.9894
355
+ 2025-09-19 01:28:04,506 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9810
356
+ 2025-09-19 01:28:09,505 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0073 | Val mean-roc_auc_score: 0.9909
357
+ 2025-09-19 01:28:14,367 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9904
358
+ 2025-09-19 01:28:19,892 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.9904
359
+ 2025-09-19 01:28:26,302 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0002 | Val mean-roc_auc_score: 0.9904
360
+ 2025-09-19 01:28:31,252 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.9904
361
+ 2025-09-19 01:28:35,905 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.9899
362
+ 2025-09-19 01:28:41,290 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.9887
363
+ 2025-09-19 01:28:47,043 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.9857
364
+ 2025-09-19 01:28:52,864 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.9853
365
+ 2025-09-19 01:28:57,694 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.9899
366
+ 2025-09-19 01:29:02,560 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.9899
367
+ 2025-09-19 01:29:07,874 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.9904
368
+ 2025-09-19 01:29:08,700 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.9924
369
+ 2025-09-19 01:29:09,192 - logs_modchembert_clintox_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.9820, Std Dev: 0.0138
logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_hiv_epochs100_batch_size32_20250922_102847.log ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-22 10:28:47,515 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Running benchmark for dataset: hiv
2
+ 2025-09-22 10:28:47,516 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - dataset: hiv, tasks: ['HIV_active'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-22 10:28:47,521 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset hiv at 2025-09-22_10-28-47
4
+ 2025-09-22 10:30:26,847 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1308 | Val mean-roc_auc_score: 0.8130
5
+ 2025-09-22 10:30:26,847 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 1027
6
+ 2025-09-22 10:30:27,367 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.8130
7
+ 2025-09-22 10:32:07,633 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1047 | Val mean-roc_auc_score: 0.8126
8
+ 2025-09-22 10:33:46,520 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1250 | Val mean-roc_auc_score: 0.8233
9
+ 2025-09-22 10:33:46,664 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 3081
10
+ 2025-09-22 10:33:47,193 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.8233
11
+ 2025-09-22 10:35:28,160 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0879 | Val mean-roc_auc_score: 0.8289
12
+ 2025-09-22 10:35:28,316 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 4108
13
+ 2025-09-22 10:35:28,870 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.8289
14
+ 2025-09-22 10:37:09,152 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0679 | Val mean-roc_auc_score: 0.8282
15
+ 2025-09-22 10:38:49,226 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0532 | Val mean-roc_auc_score: 0.8285
16
+ 2025-09-22 10:40:29,299 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0448 | Val mean-roc_auc_score: 0.8204
17
+ 2025-09-22 10:42:10,394 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0264 | Val mean-roc_auc_score: 0.8392
18
+ 2025-09-22 10:42:10,554 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 8216
19
+ 2025-09-22 10:42:11,130 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.8392
20
+ 2025-09-22 10:43:51,497 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0371 | Val mean-roc_auc_score: 0.8091
21
+ 2025-09-22 10:45:31,755 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0301 | Val mean-roc_auc_score: 0.8061
22
+ 2025-09-22 10:47:12,010 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0164 | Val mean-roc_auc_score: 0.8106
23
+ 2025-09-22 10:48:53,721 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0158 | Val mean-roc_auc_score: 0.8339
24
+ 2025-09-22 10:50:34,309 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0176 | Val mean-roc_auc_score: 0.8149
25
+ 2025-09-22 10:52:15,421 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0141 | Val mean-roc_auc_score: 0.8255
26
+ 2025-09-22 10:53:55,924 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.8169
27
+ 2025-09-22 10:55:36,322 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0134 | Val mean-roc_auc_score: 0.8321
28
+ 2025-09-22 10:57:17,675 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0211 | Val mean-roc_auc_score: 0.8391
29
+ 2025-09-22 10:58:56,865 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0169 | Val mean-roc_auc_score: 0.8147
30
+ 2025-09-22 11:00:36,745 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.8232
31
+ 2025-09-22 11:02:17,436 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0160 | Val mean-roc_auc_score: 0.8143
32
+ 2025-09-22 11:03:58,500 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0117 | Val mean-roc_auc_score: 0.8302
33
+ 2025-09-22 11:05:38,078 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.8194
34
+ 2025-09-22 11:07:19,535 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.8160
35
+ 2025-09-22 11:08:59,346 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.8038
36
+ 2025-09-22 11:10:39,656 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0087 | Val mean-roc_auc_score: 0.8096
37
+ 2025-09-22 11:12:19,718 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0148 | Val mean-roc_auc_score: 0.8096
38
+ 2025-09-22 11:13:59,772 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.8184
39
+ 2025-09-22 11:15:40,937 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0090 | Val mean-roc_auc_score: 0.8169
40
+ 2025-09-22 11:17:20,673 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.8112
41
+ 2025-09-22 11:19:01,076 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.8182
42
+ 2025-09-22 11:20:41,084 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.8185
43
+ 2025-09-22 11:22:22,096 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0068 | Val mean-roc_auc_score: 0.8086
44
+ 2025-09-22 11:24:01,721 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8158
45
+ 2025-09-22 11:25:42,250 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.8118
46
+ 2025-09-22 11:27:23,529 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8074
47
+ 2025-09-22 11:29:05,244 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.8153
48
+ 2025-09-22 11:30:46,310 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.8089
49
+ 2025-09-22 11:32:26,932 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.8105
50
+ 2025-09-22 11:34:07,145 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0079 | Val mean-roc_auc_score: 0.8060
51
+ 2025-09-22 11:35:46,959 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.8006
52
+ 2025-09-22 11:37:27,411 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.8148
53
+ 2025-09-22 11:39:07,188 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.8113
54
+ 2025-09-22 11:40:48,805 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.8119
55
+ 2025-09-22 11:42:28,197 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.8169
56
+ 2025-09-22 11:44:09,094 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.8104
57
+ 2025-09-22 11:45:49,185 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8095
58
+ 2025-09-22 11:47:30,243 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.8118
59
+ 2025-09-22 11:49:09,811 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8176
60
+ 2025-09-22 11:50:50,790 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.8141
61
+ 2025-09-22 11:52:30,630 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.8157
62
+ 2025-09-22 11:54:11,805 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8139
63
+ 2025-09-22 11:55:52,135 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.8182
64
+ 2025-09-22 11:57:31,960 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.8175
65
+ 2025-09-22 11:59:12,698 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0076 | Val mean-roc_auc_score: 0.8202
66
+ 2025-09-22 12:00:52,123 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.8174
67
+ 2025-09-22 12:02:33,453 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.8224
68
+ 2025-09-22 12:04:13,206 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8165
69
+ 2025-09-22 12:05:53,554 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.8192
70
+ 2025-09-22 12:07:34,455 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8211
71
+ 2025-09-22 12:09:15,241 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.8195
72
+ 2025-09-22 12:10:54,325 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.8166
73
+ 2025-09-22 12:12:35,654 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.8147
74
+ 2025-09-22 12:14:16,152 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0000 | Val mean-roc_auc_score: 0.8137
75
+ 2025-09-22 12:15:56,776 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.8220
76
+ 2025-09-22 12:17:36,962 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8179
77
+ 2025-09-22 12:19:17,148 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8205
78
+ 2025-09-22 12:20:58,632 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.8183
79
+ 2025-09-22 12:22:37,527 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.8120
80
+ 2025-09-22 12:24:18,071 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.8155
81
+ 2025-09-22 12:25:57,969 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.8137
82
+ 2025-09-22 12:27:38,666 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.8158
83
+ 2025-09-22 12:29:18,225 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.8133
84
+ 2025-09-22 12:30:58,508 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.8154
85
+ 2025-09-22 12:32:38,351 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.8175
86
+ 2025-09-22 12:34:19,625 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0015 | Val mean-roc_auc_score: 0.8181
87
+ 2025-09-22 12:35:59,437 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.8174
88
+ 2025-09-22 12:37:40,288 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.8164
89
+ 2025-09-22 12:39:19,553 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.8166
90
+ 2025-09-22 12:41:00,212 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.8147
91
+ 2025-09-22 12:42:40,528 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.8187
92
+ 2025-09-22 12:44:19,857 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.8209
93
+ 2025-09-22 12:46:00,682 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.8167
94
+ 2025-09-22 12:47:40,767 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0047 | Val mean-roc_auc_score: 0.8212
95
+ 2025-09-22 12:49:21,865 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.8187
96
+ 2025-09-22 12:50:59,842 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8171
97
+ 2025-09-22 12:52:41,015 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.8184
98
+ 2025-09-22 12:54:21,726 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.8232
99
+ 2025-09-22 12:56:02,729 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.8172
100
+ 2025-09-22 12:57:42,659 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0010 | Val mean-roc_auc_score: 0.8196
101
+ 2025-09-22 12:59:23,518 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.8177
102
+ 2025-09-22 13:01:03,314 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.8171
103
+ 2025-09-22 13:02:43,528 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0027 | Val mean-roc_auc_score: 0.8192
104
+ 2025-09-22 13:04:23,611 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8171
105
+ 2025-09-22 13:06:03,759 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.8153
106
+ 2025-09-22 13:07:43,858 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.8154
107
+ 2025-09-22 13:09:22,883 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.8158
108
+ 2025-09-22 13:11:04,022 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0077 | Val mean-roc_auc_score: 0.8160
109
+ 2025-09-22 13:12:43,685 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.8192
110
+ 2025-09-22 13:14:24,012 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8159
111
+ 2025-09-22 13:16:03,615 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8169
112
+ 2025-09-22 13:16:08,999 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7677
113
+ 2025-09-22 13:16:09,471 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset hiv at 2025-09-22_13-16-09
114
+ 2025-09-22 13:17:45,478 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1157 | Val mean-roc_auc_score: 0.8026
115
+ 2025-09-22 13:17:45,478 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 1027
116
+ 2025-09-22 13:17:45,992 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.8026
117
+ 2025-09-22 13:19:25,684 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1111 | Val mean-roc_auc_score: 0.8059
118
+ 2025-09-22 13:19:25,828 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 2054
119
+ 2025-09-22 13:19:26,367 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.8059
120
+ 2025-09-22 13:21:06,914 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0741 | Val mean-roc_auc_score: 0.8097
121
+ 2025-09-22 13:21:07,057 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 3081
122
+ 2025-09-22 13:21:07,583 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.8097
123
+ 2025-09-22 13:22:46,904 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1504 | Val mean-roc_auc_score: 0.8205
124
+ 2025-09-22 13:22:47,047 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 4108
125
+ 2025-09-22 13:22:47,581 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.8205
126
+ 2025-09-22 13:24:28,914 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0879 | Val mean-roc_auc_score: 0.7900
127
+ 2025-09-22 13:26:09,001 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0701 | Val mean-roc_auc_score: 0.8336
128
+ 2025-09-22 13:26:09,645 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 6162
129
+ 2025-09-22 13:26:10,184 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.8336
130
+ 2025-09-22 13:27:50,841 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0499 | Val mean-roc_auc_score: 0.8175
131
+ 2025-09-22 13:29:30,381 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0344 | Val mean-roc_auc_score: 0.8373
132
+ 2025-09-22 13:29:30,539 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 8216
133
+ 2025-09-22 13:29:31,093 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.8373
134
+ 2025-09-22 13:31:11,598 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0338 | Val mean-roc_auc_score: 0.8194
135
+ 2025-09-22 13:32:52,742 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0301 | Val mean-roc_auc_score: 0.8212
136
+ 2025-09-22 13:34:33,186 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0137 | Val mean-roc_auc_score: 0.8272
137
+ 2025-09-22 13:36:13,887 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0166 | Val mean-roc_auc_score: 0.8332
138
+ 2025-09-22 13:37:52,699 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0088 | Val mean-roc_auc_score: 0.8245
139
+ 2025-09-22 13:39:33,974 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0144 | Val mean-roc_auc_score: 0.8309
140
+ 2025-09-22 13:41:12,749 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0045 | Val mean-roc_auc_score: 0.8312
141
+ 2025-09-22 13:42:53,724 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0115 | Val mean-roc_auc_score: 0.8268
142
+ 2025-09-22 13:44:33,643 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0122 | Val mean-roc_auc_score: 0.8294
143
+ 2025-09-22 13:46:14,742 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0132 | Val mean-roc_auc_score: 0.8188
144
+ 2025-09-22 13:47:54,130 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0094 | Val mean-roc_auc_score: 0.8207
145
+ 2025-09-22 13:49:34,414 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.8258
146
+ 2025-09-22 13:51:14,761 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0061 | Val mean-roc_auc_score: 0.8288
147
+ 2025-09-22 13:52:55,663 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.8302
148
+ 2025-09-22 13:54:35,668 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0138 | Val mean-roc_auc_score: 0.8370
149
+ 2025-09-22 13:56:14,713 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0088 | Val mean-roc_auc_score: 0.8269
150
+ 2025-09-22 13:57:55,010 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.8310
151
+ 2025-09-22 13:59:34,329 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.8271
152
+ 2025-09-22 14:01:14,480 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0072 | Val mean-roc_auc_score: 0.8242
153
+ 2025-09-22 14:02:54,532 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0102 | Val mean-roc_auc_score: 0.8290
154
+ 2025-09-22 14:04:34,953 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.8255
155
+ 2025-09-22 14:06:14,736 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.8242
156
+ 2025-09-22 14:07:55,634 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.8244
157
+ 2025-09-22 14:09:35,016 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.8229
158
+ 2025-09-22 14:11:15,860 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.8267
159
+ 2025-09-22 14:12:56,644 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.8238
160
+ 2025-09-22 14:14:37,042 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.8258
161
+ 2025-09-22 14:16:17,314 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.8228
162
+ 2025-09-22 14:17:58,279 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0052 | Val mean-roc_auc_score: 0.8233
163
+ 2025-09-22 14:19:38,871 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0074 | Val mean-roc_auc_score: 0.8207
164
+ 2025-09-22 14:21:18,556 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8181
165
+ 2025-09-22 14:22:58,426 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8182
166
+ 2025-09-22 14:24:38,713 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0003 | Val mean-roc_auc_score: 0.8202
167
+ 2025-09-22 14:26:19,312 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0048 | Val mean-roc_auc_score: 0.8156
168
+ 2025-09-22 14:27:58,859 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.8163
169
+ 2025-09-22 14:29:39,452 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0056 | Val mean-roc_auc_score: 0.8225
170
+ 2025-09-22 14:31:18,436 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.8205
171
+ 2025-09-22 14:32:59,123 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8189
172
+ 2025-09-22 14:34:39,163 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8168
173
+ 2025-09-22 14:36:19,466 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.8146
174
+ 2025-09-22 14:37:59,246 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.8174
175
+ 2025-09-22 14:39:39,036 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0046 | Val mean-roc_auc_score: 0.8179
176
+ 2025-09-22 14:41:20,279 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.8191
177
+ 2025-09-22 14:43:00,407 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.8178
178
+ 2025-09-22 14:44:40,398 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.8175
179
+ 2025-09-22 14:46:20,387 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.8178
180
+ 2025-09-22 14:48:01,302 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.8185
181
+ 2025-09-22 14:49:36,212 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.8233
182
+ 2025-09-22 14:50:35,578 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.8221
183
+ 2025-09-22 14:51:34,456 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.8190
184
+ 2025-09-22 14:52:33,856 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0049 | Val mean-roc_auc_score: 0.8242
185
+ 2025-09-22 14:53:32,679 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8200
186
+ 2025-09-22 14:54:31,949 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0018 | Val mean-roc_auc_score: 0.8232
187
+ 2025-09-22 14:55:31,443 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0019 | Val mean-roc_auc_score: 0.8206
188
+ 2025-09-22 14:56:30,009 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0000 | Val mean-roc_auc_score: 0.8252
189
+ 2025-09-22 14:57:29,433 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0016 | Val mean-roc_auc_score: 0.8211
190
+ 2025-09-22 14:58:28,615 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.8190
191
+ 2025-09-22 14:59:27,655 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.8169
192
+ 2025-09-22 15:00:26,907 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8197
193
+ 2025-09-22 15:01:25,996 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8188
194
+ 2025-09-22 15:02:25,171 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.8171
195
+ 2025-09-22 15:03:23,912 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.8183
196
+ 2025-09-22 15:04:23,078 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.8203
197
+ 2025-09-22 15:05:22,308 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8164
198
+ 2025-09-22 15:06:21,359 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.8185
199
+ 2025-09-22 15:07:20,679 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.8203
200
+ 2025-09-22 15:08:20,617 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8199
201
+ 2025-09-22 15:09:19,587 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0037 | Val mean-roc_auc_score: 0.8211
202
+ 2025-09-22 15:10:19,036 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.8185
203
+ 2025-09-22 15:11:17,744 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.8201
204
+ 2025-09-22 15:12:17,060 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0054 | Val mean-roc_auc_score: 0.8188
205
+ 2025-09-22 15:13:16,105 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0065 | Val mean-roc_auc_score: 0.8201
206
+ 2025-09-22 15:14:15,617 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.8183
207
+ 2025-09-22 15:15:14,913 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.8198
208
+ 2025-09-22 15:16:14,084 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.8158
209
+ 2025-09-22 15:17:13,560 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.8194
210
+ 2025-09-22 15:18:12,367 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8206
211
+ 2025-09-22 15:19:12,056 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0063 | Val mean-roc_auc_score: 0.8179
212
+ 2025-09-22 15:20:11,493 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.8207
213
+ 2025-09-22 15:21:10,546 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.8181
214
+ 2025-09-22 15:22:10,121 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0001 | Val mean-roc_auc_score: 0.8173
215
+ 2025-09-22 15:23:08,936 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0090 | Val mean-roc_auc_score: 0.8154
216
+ 2025-09-22 15:24:08,334 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0022 | Val mean-roc_auc_score: 0.8147
217
+ 2025-09-22 15:25:07,664 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8160
218
+ 2025-09-22 15:26:06,767 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.8170
219
+ 2025-09-22 15:27:06,306 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8169
220
+ 2025-09-22 15:28:05,195 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.8173
221
+ 2025-09-22 15:29:04,642 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.8128
222
+ 2025-09-22 15:30:03,806 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0069 | Val mean-roc_auc_score: 0.8158
223
+ 2025-09-22 15:31:02,681 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.8114
224
+ 2025-09-22 15:32:01,951 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.8152
225
+ 2025-09-22 15:33:00,855 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8147
226
+ 2025-09-22 15:33:04,051 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7704
227
+ 2025-09-22 15:33:04,686 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset hiv at 2025-09-22_15-33-04
228
+ 2025-09-22 15:34:00,315 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1082 | Val mean-roc_auc_score: 0.7705
229
+ 2025-09-22 15:34:00,315 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 1027
230
+ 2025-09-22 15:34:00,836 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7705
231
+ 2025-09-22 15:35:00,044 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1042 | Val mean-roc_auc_score: 0.8042
232
+ 2025-09-22 15:35:00,193 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 2054
233
+ 2025-09-22 15:35:00,723 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.8042
234
+ 2025-09-22 15:35:59,626 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1196 | Val mean-roc_auc_score: 0.8023
235
+ 2025-09-22 15:36:58,097 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0391 | Val mean-roc_auc_score: 0.8289
236
+ 2025-09-22 15:36:58,240 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 4108
237
+ 2025-09-22 15:36:58,767 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.8289
238
+ 2025-09-22 15:37:58,166 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0692 | Val mean-roc_auc_score: 0.8127
239
+ 2025-09-22 15:38:56,624 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0519 | Val mean-roc_auc_score: 0.8345
240
+ 2025-09-22 15:38:57,395 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 6162
241
+ 2025-09-22 15:38:57,919 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.8345
242
+ 2025-09-22 15:39:56,857 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0516 | Val mean-roc_auc_score: 0.8306
243
+ 2025-09-22 15:40:55,601 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0366 | Val mean-roc_auc_score: 0.8196
244
+ 2025-09-22 15:41:55,068 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0298 | Val mean-roc_auc_score: 0.8358
245
+ 2025-09-22 15:41:55,218 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Global step of best model: 9243
246
+ 2025-09-22 15:41:55,759 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.8358
247
+ 2025-09-22 15:42:54,310 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0282 | Val mean-roc_auc_score: 0.8338
248
+ 2025-09-22 15:43:54,117 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0216 | Val mean-roc_auc_score: 0.8207
249
+ 2025-09-22 15:44:53,593 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.8276
250
+ 2025-09-22 15:45:52,268 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0201 | Val mean-roc_auc_score: 0.8287
251
+ 2025-09-22 15:46:51,636 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0134 | Val mean-roc_auc_score: 0.8152
252
+ 2025-09-22 15:47:50,295 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.8171
253
+ 2025-09-22 15:48:49,764 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0123 | Val mean-roc_auc_score: 0.8275
254
+ 2025-09-22 15:49:49,029 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0101 | Val mean-roc_auc_score: 0.8110
255
+ 2025-09-22 15:50:47,836 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0064 | Val mean-roc_auc_score: 0.8185
256
+ 2025-09-22 15:51:47,723 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.8174
257
+ 2025-09-22 15:52:46,415 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0071 | Val mean-roc_auc_score: 0.8228
258
+ 2025-09-22 15:53:46,020 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.8158
259
+ 2025-09-22 15:54:45,634 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0105 | Val mean-roc_auc_score: 0.8159
260
+ 2025-09-22 15:55:44,555 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0141 | Val mean-roc_auc_score: 0.8158
261
+ 2025-09-22 15:56:43,940 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0109 | Val mean-roc_auc_score: 0.8135
262
+ 2025-09-22 15:57:42,784 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0082 | Val mean-roc_auc_score: 0.8274
263
+ 2025-09-22 15:58:41,983 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0017 | Val mean-roc_auc_score: 0.8140
264
+ 2025-09-22 15:59:41,449 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0055 | Val mean-roc_auc_score: 0.8211
265
+ 2025-09-22 16:00:40,459 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.8228
266
+ 2025-09-22 16:01:39,879 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0075 | Val mean-roc_auc_score: 0.8289
267
+ 2025-09-22 16:02:38,393 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.8163
268
+ 2025-09-22 16:03:37,549 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8294
269
+ 2025-09-22 16:04:37,228 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.8258
270
+ 2025-09-22 16:05:36,575 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0111 | Val mean-roc_auc_score: 0.8209
271
+ 2025-09-22 16:06:36,082 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.8239
272
+ 2025-09-22 16:07:34,801 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0036 | Val mean-roc_auc_score: 0.8203
273
+ 2025-09-22 16:08:34,003 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0059 | Val mean-roc_auc_score: 0.8204
274
+ 2025-09-22 16:09:34,310 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8204
275
+ 2025-09-22 16:10:32,792 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0098 | Val mean-roc_auc_score: 0.8266
276
+ 2025-09-22 16:11:31,990 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0078 | Val mean-roc_auc_score: 0.8248
277
+ 2025-09-22 16:12:30,838 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0058 | Val mean-roc_auc_score: 0.8211
278
+ 2025-09-22 16:13:30,594 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0025 | Val mean-roc_auc_score: 0.8227
279
+ 2025-09-22 16:14:30,495 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.8238
280
+ 2025-09-22 16:15:29,419 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.8231
281
+ 2025-09-22 16:16:28,847 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8190
282
+ 2025-09-22 16:17:27,735 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0030 | Val mean-roc_auc_score: 0.8210
283
+ 2025-09-22 16:18:27,121 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0124 | Val mean-roc_auc_score: 0.8221
284
+ 2025-09-22 16:19:26,644 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.8191
285
+ 2025-09-22 16:20:25,180 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0066 | Val mean-roc_auc_score: 0.8107
286
+ 2025-09-22 16:21:24,882 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0076 | Val mean-roc_auc_score: 0.8171
287
+ 2025-09-22 16:22:23,708 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.8191
288
+ 2025-09-22 16:23:22,991 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0033 | Val mean-roc_auc_score: 0.8151
289
+ 2025-09-22 16:24:22,474 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0085 | Val mean-roc_auc_score: 0.8166
290
+ 2025-09-22 16:25:21,282 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8167
291
+ 2025-09-22 16:26:21,142 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0051 | Val mean-roc_auc_score: 0.8209
292
+ 2025-09-22 16:27:19,546 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8183
293
+ 2025-09-22 16:28:18,946 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8214
294
+ 2025-09-22 16:29:18,132 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0023 | Val mean-roc_auc_score: 0.8164
295
+ 2025-09-22 16:30:16,989 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.8155
296
+ 2025-09-22 16:31:16,450 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0053 | Val mean-roc_auc_score: 0.8145
297
+ 2025-09-22 16:32:14,921 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.8158
298
+ 2025-09-22 16:33:14,556 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0062 | Val mean-roc_auc_score: 0.8127
299
+ 2025-09-22 16:34:14,248 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8115
300
+ 2025-09-22 16:35:12,829 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0005 | Val mean-roc_auc_score: 0.8155
301
+ 2025-09-22 16:36:12,364 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0067 | Val mean-roc_auc_score: 0.8143
302
+ 2025-09-22 16:37:11,063 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.8183
303
+ 2025-09-22 16:38:10,302 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8162
304
+ 2025-09-22 16:39:09,823 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.8157
305
+ 2025-09-22 16:40:08,737 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0020 | Val mean-roc_auc_score: 0.8156
306
+ 2025-09-22 16:41:08,192 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.8151
307
+ 2025-09-22 16:42:06,843 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.8160
308
+ 2025-09-22 16:43:06,077 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.8162
309
+ 2025-09-22 16:44:05,774 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0011 | Val mean-roc_auc_score: 0.8167
310
+ 2025-09-22 16:45:04,640 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0039 | Val mean-roc_auc_score: 0.8143
311
+ 2025-09-22 16:46:04,187 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0040 | Val mean-roc_auc_score: 0.8144
312
+ 2025-09-22 16:47:03,855 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0041 | Val mean-roc_auc_score: 0.8176
313
+ 2025-09-22 16:48:03,736 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.8164
314
+ 2025-09-22 16:49:02,975 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0021 | Val mean-roc_auc_score: 0.8142
315
+ 2025-09-22 16:50:01,911 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0160 | Val mean-roc_auc_score: 0.8155
316
+ 2025-09-22 16:51:01,157 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0009 | Val mean-roc_auc_score: 0.8146
317
+ 2025-09-22 16:52:00,029 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0035 | Val mean-roc_auc_score: 0.8139
318
+ 2025-09-22 16:52:59,733 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0031 | Val mean-roc_auc_score: 0.8158
319
+ 2025-09-22 16:53:59,191 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0070 | Val mean-roc_auc_score: 0.8175
320
+ 2025-09-22 16:54:57,944 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0029 | Val mean-roc_auc_score: 0.8127
321
+ 2025-09-22 16:55:57,576 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.8149
322
+ 2025-09-22 16:56:56,248 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0024 | Val mean-roc_auc_score: 0.8159
323
+ 2025-09-22 16:57:55,843 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0080 | Val mean-roc_auc_score: 0.8198
324
+ 2025-09-22 16:58:55,169 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0050 | Val mean-roc_auc_score: 0.8123
325
+ 2025-09-22 16:59:53,914 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0034 | Val mean-roc_auc_score: 0.8137
326
+ 2025-09-22 17:00:53,426 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0013 | Val mean-roc_auc_score: 0.8127
327
+ 2025-09-22 17:01:52,128 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0060 | Val mean-roc_auc_score: 0.8143
328
+ 2025-09-22 17:02:51,658 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0038 | Val mean-roc_auc_score: 0.8110
329
+ 2025-09-22 17:03:50,933 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0028 | Val mean-roc_auc_score: 0.8144
330
+ 2025-09-22 17:04:49,616 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.8134
331
+ 2025-09-22 17:05:49,205 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0042 | Val mean-roc_auc_score: 0.8135
332
+ 2025-09-22 17:06:48,058 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0044 | Val mean-roc_auc_score: 0.8155
333
+ 2025-09-22 17:07:47,849 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0032 | Val mean-roc_auc_score: 0.8141
334
+ 2025-09-22 17:08:47,413 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0012 | Val mean-roc_auc_score: 0.8120
335
+ 2025-09-22 17:09:46,363 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0057 | Val mean-roc_auc_score: 0.8161
336
+ 2025-09-22 17:10:46,113 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0043 | Val mean-roc_auc_score: 0.8115
337
+ 2025-09-22 17:11:44,813 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0026 | Val mean-roc_auc_score: 0.8129
338
+ 2025-09-22 17:11:48,028 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7726
339
+ 2025-09-22 17:11:48,851 - logs_modchembert_hiv_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.7702, Std Dev: 0.0020
logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_sider_epochs100_batch_size32_20250919_003207.log ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-19 00:32:07,326 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Running benchmark for dataset: sider
2
+ 2025-09-19 00:32:07,326 - logs_modchembert_sider_epochs100_batch_size32 - INFO - dataset: sider, tasks: ['Hepatobiliary disorders', 'Metabolism and nutrition disorders', 'Product issues', 'Eye disorders', 'Investigations', 'Musculoskeletal and connective tissue disorders', 'Gastrointestinal disorders', 'Social circumstances', 'Immune system disorders', 'Reproductive system and breast disorders', 'Neoplasms benign, malignant and unspecified (incl cysts and polyps)', 'General disorders and administration site conditions', 'Endocrine disorders', 'Surgical and medical procedures', 'Vascular disorders', 'Blood and lymphatic system disorders', 'Skin and subcutaneous tissue disorders', 'Congenital, familial and genetic disorders', 'Infections and infestations', 'Respiratory, thoracic and mediastinal disorders', 'Psychiatric disorders', 'Renal and urinary disorders', 'Pregnancy, puerperium and perinatal conditions', 'Ear and labyrinth disorders', 'Cardiac disorders', 'Nervous system disorders', 'Injury, poisoning and procedural complications'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-19 00:32:07,339 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset sider at 2025-09-19_00-32-07
4
+ 2025-09-19 00:32:10,313 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5286 | Val mean-roc_auc_score: 0.5437
5
+ 2025-09-19 00:32:10,313 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 35
6
+ 2025-09-19 00:32:10,834 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.5437
7
+ 2025-09-19 00:32:14,059 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5036 | Val mean-roc_auc_score: 0.5625
8
+ 2025-09-19 00:32:14,238 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 70
9
+ 2025-09-19 00:32:14,767 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.5625
10
+ 2025-09-19 00:32:17,956 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.4875 | Val mean-roc_auc_score: 0.5737
11
+ 2025-09-19 00:32:18,137 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 105
12
+ 2025-09-19 00:32:18,663 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.5737
13
+ 2025-09-19 00:32:21,844 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.4679 | Val mean-roc_auc_score: 0.5965
14
+ 2025-09-19 00:32:22,022 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 140
15
+ 2025-09-19 00:32:22,547 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.5965
16
+ 2025-09-19 00:32:25,732 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.4464 | Val mean-roc_auc_score: 0.5971
17
+ 2025-09-19 00:32:25,911 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 175
18
+ 2025-09-19 00:32:26,440 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.5971
19
+ 2025-09-19 00:32:29,644 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4250 | Val mean-roc_auc_score: 0.5888
20
+ 2025-09-19 00:32:33,257 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.3875 | Val mean-roc_auc_score: 0.5899
21
+ 2025-09-19 00:32:36,437 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.3536 | Val mean-roc_auc_score: 0.5791
22
+ 2025-09-19 00:32:39,644 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3438 | Val mean-roc_auc_score: 0.6107
23
+ 2025-09-19 00:32:39,822 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 315
24
+ 2025-09-19 00:32:40,344 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.6107
25
+ 2025-09-19 00:32:43,535 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3054 | Val mean-roc_auc_score: 0.6120
26
+ 2025-09-19 00:32:43,713 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 350
27
+ 2025-09-19 00:32:44,236 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.6120
28
+ 2025-09-19 00:32:47,398 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2839 | Val mean-roc_auc_score: 0.5961
29
+ 2025-09-19 00:32:50,989 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2656 | Val mean-roc_auc_score: 0.5912
30
+ 2025-09-19 00:32:54,148 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2482 | Val mean-roc_auc_score: 0.5945
31
+ 2025-09-19 00:32:57,378 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2375 | Val mean-roc_auc_score: 0.6023
32
+ 2025-09-19 00:33:00,650 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.2325 | Val mean-roc_auc_score: 0.5930
33
+ 2025-09-19 00:33:03,909 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.2107 | Val mean-roc_auc_score: 0.6055
34
+ 2025-09-19 00:33:07,970 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.2045 | Val mean-roc_auc_score: 0.5943
35
+ 2025-09-19 00:33:11,537 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1896 | Val mean-roc_auc_score: 0.6008
36
+ 2025-09-19 00:33:14,979 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1848 | Val mean-roc_auc_score: 0.5973
37
+ 2025-09-19 00:33:18,605 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1821 | Val mean-roc_auc_score: 0.5950
38
+ 2025-09-19 00:33:22,082 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1723 | Val mean-roc_auc_score: 0.5935
39
+ 2025-09-19 00:33:26,287 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1634 | Val mean-roc_auc_score: 0.6113
40
+ 2025-09-19 00:33:29,913 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1680 | Val mean-roc_auc_score: 0.5977
41
+ 2025-09-19 00:33:33,714 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1536 | Val mean-roc_auc_score: 0.6033
42
+ 2025-09-19 00:33:39,010 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.1437 | Val mean-roc_auc_score: 0.6007
43
+ 2025-09-19 00:33:43,664 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.1445 | Val mean-roc_auc_score: 0.6014
44
+ 2025-09-19 00:33:48,289 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1384 | Val mean-roc_auc_score: 0.5998
45
+ 2025-09-19 00:33:53,224 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.1348 | Val mean-roc_auc_score: 0.5956
46
+ 2025-09-19 00:33:58,875 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1417 | Val mean-roc_auc_score: 0.5921
47
+ 2025-09-19 00:34:03,980 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.1295 | Val mean-roc_auc_score: 0.6072
48
+ 2025-09-19 00:34:09,238 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.1259 | Val mean-roc_auc_score: 0.6065
49
+ 2025-09-19 00:34:14,593 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1273 | Val mean-roc_auc_score: 0.5937
50
+ 2025-09-19 00:34:19,939 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.1241 | Val mean-roc_auc_score: 0.6135
51
+ 2025-09-19 00:34:20,089 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 1155
52
+ 2025-09-19 00:34:20,633 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 33 with val mean-roc_auc_score: 0.6135
53
+ 2025-09-19 00:34:25,742 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.1187 | Val mean-roc_auc_score: 0.5982
54
+ 2025-09-19 00:34:30,672 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.1150 | Val mean-roc_auc_score: 0.6035
55
+ 2025-09-19 00:34:36,030 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.1134 | Val mean-roc_auc_score: 0.5931
56
+ 2025-09-19 00:34:41,949 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.1098 | Val mean-roc_auc_score: 0.6015
57
+ 2025-09-19 00:34:47,178 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.1099 | Val mean-roc_auc_score: 0.6107
58
+ 2025-09-19 00:34:52,457 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.1071 | Val mean-roc_auc_score: 0.5998
59
+ 2025-09-19 00:34:57,807 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.1045 | Val mean-roc_auc_score: 0.5968
60
+ 2025-09-19 00:35:03,110 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.1076 | Val mean-roc_auc_score: 0.6078
61
+ 2025-09-19 00:35:08,680 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.1080 | Val mean-roc_auc_score: 0.6051
62
+ 2025-09-19 00:35:13,813 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.1203 | Val mean-roc_auc_score: 0.6072
63
+ 2025-09-19 00:35:19,142 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0969 | Val mean-roc_auc_score: 0.6116
64
+ 2025-09-19 00:35:24,013 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0978 | Val mean-roc_auc_score: 0.6088
65
+ 2025-09-19 00:35:29,108 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0961 | Val mean-roc_auc_score: 0.6007
66
+ 2025-09-19 00:35:34,566 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0938 | Val mean-roc_auc_score: 0.6062
67
+ 2025-09-19 00:35:39,637 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0924 | Val mean-roc_auc_score: 0.5918
68
+ 2025-09-19 00:35:44,868 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0948 | Val mean-roc_auc_score: 0.5986
69
+ 2025-09-19 00:35:49,972 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0911 | Val mean-roc_auc_score: 0.6057
70
+ 2025-09-19 00:35:55,003 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0902 | Val mean-roc_auc_score: 0.5983
71
+ 2025-09-19 00:36:00,569 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0953 | Val mean-roc_auc_score: 0.6078
72
+ 2025-09-19 00:36:06,128 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0924 | Val mean-roc_auc_score: 0.6028
73
+ 2025-09-19 00:36:11,831 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0902 | Val mean-roc_auc_score: 0.6041
74
+ 2025-09-19 00:36:17,519 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0906 | Val mean-roc_auc_score: 0.6104
75
+ 2025-09-19 00:36:23,212 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0875 | Val mean-roc_auc_score: 0.6086
76
+ 2025-09-19 00:36:29,290 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0879 | Val mean-roc_auc_score: 0.6064
77
+ 2025-09-19 00:36:35,435 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0875 | Val mean-roc_auc_score: 0.5979
78
+ 2025-09-19 00:36:40,435 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0848 | Val mean-roc_auc_score: 0.6010
79
+ 2025-09-19 00:36:45,773 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0804 | Val mean-roc_auc_score: 0.6094
80
+ 2025-09-19 00:36:50,926 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0844 | Val mean-roc_auc_score: 0.6022
81
+ 2025-09-19 00:36:56,441 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0839 | Val mean-roc_auc_score: 0.6088
82
+ 2025-09-19 00:37:01,557 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0859 | Val mean-roc_auc_score: 0.6048
83
+ 2025-09-19 00:37:06,512 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0799 | Val mean-roc_auc_score: 0.6044
84
+ 2025-09-19 00:37:11,702 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.6037
85
+ 2025-09-19 00:37:16,855 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0867 | Val mean-roc_auc_score: 0.5961
86
+ 2025-09-19 00:37:22,440 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.5983
87
+ 2025-09-19 00:37:27,465 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.6066
88
+ 2025-09-19 00:37:32,627 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0833 | Val mean-roc_auc_score: 0.5877
89
+ 2025-09-19 00:37:37,773 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0795 | Val mean-roc_auc_score: 0.5982
90
+ 2025-09-19 00:37:42,972 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0768 | Val mean-roc_auc_score: 0.6035
91
+ 2025-09-19 00:37:48,189 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0824 | Val mean-roc_auc_score: 0.5964
92
+ 2025-09-19 00:37:53,351 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0772 | Val mean-roc_auc_score: 0.5969
93
+ 2025-09-19 00:37:58,475 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0750 | Val mean-roc_auc_score: 0.5934
94
+ 2025-09-19 00:38:03,697 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0766 | Val mean-roc_auc_score: 0.5933
95
+ 2025-09-19 00:38:08,780 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.5988
96
+ 2025-09-19 00:38:14,203 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.5998
97
+ 2025-09-19 00:38:19,211 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0729 | Val mean-roc_auc_score: 0.5986
98
+ 2025-09-19 00:38:24,714 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.6017
99
+ 2025-09-19 00:38:30,080 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0732 | Val mean-roc_auc_score: 0.5979
100
+ 2025-09-19 00:38:35,338 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.6030
101
+ 2025-09-19 00:38:40,957 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0750 | Val mean-roc_auc_score: 0.6033
102
+ 2025-09-19 00:38:46,267 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0922 | Val mean-roc_auc_score: 0.5971
103
+ 2025-09-19 00:38:51,396 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0723 | Val mean-roc_auc_score: 0.5977
104
+ 2025-09-19 00:38:56,572 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0710 | Val mean-roc_auc_score: 0.5961
105
+ 2025-09-19 00:39:02,945 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0707 | Val mean-roc_auc_score: 0.5906
106
+ 2025-09-19 00:39:08,071 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0710 | Val mean-roc_auc_score: 0.6027
107
+ 2025-09-19 00:39:13,114 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0701 | Val mean-roc_auc_score: 0.6065
108
+ 2025-09-19 00:39:18,191 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0714 | Val mean-roc_auc_score: 0.5928
109
+ 2025-09-19 00:39:23,363 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0701 | Val mean-roc_auc_score: 0.5989
110
+ 2025-09-19 00:39:28,572 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0705 | Val mean-roc_auc_score: 0.6033
111
+ 2025-09-19 00:39:34,226 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0734 | Val mean-roc_auc_score: 0.6022
112
+ 2025-09-19 00:39:39,306 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0701 | Val mean-roc_auc_score: 0.5987
113
+ 2025-09-19 00:39:44,290 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0692 | Val mean-roc_auc_score: 0.5919
114
+ 2025-09-19 00:39:49,532 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0700 | Val mean-roc_auc_score: 0.6034
115
+ 2025-09-19 00:39:54,755 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0688 | Val mean-roc_auc_score: 0.5979
116
+ 2025-09-19 00:40:00,321 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0688 | Val mean-roc_auc_score: 0.6041
117
+ 2025-09-19 00:40:05,196 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0682 | Val mean-roc_auc_score: 0.5992
118
+ 2025-09-19 00:40:10,363 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0674 | Val mean-roc_auc_score: 0.5977
119
+ 2025-09-19 00:40:15,567 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0679 | Val mean-roc_auc_score: 0.5938
120
+ 2025-09-19 00:40:16,374 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.6317
121
+ 2025-09-19 00:40:16,847 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset sider at 2025-09-19_00-40-16
122
+ 2025-09-19 00:40:20,878 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5357 | Val mean-roc_auc_score: 0.5728
123
+ 2025-09-19 00:40:20,878 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 35
124
+ 2025-09-19 00:40:21,410 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.5728
125
+ 2025-09-19 00:40:26,187 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5036 | Val mean-roc_auc_score: 0.5793
126
+ 2025-09-19 00:40:26,331 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 70
127
+ 2025-09-19 00:40:26,877 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.5793
128
+ 2025-09-19 00:40:31,874 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.4875 | Val mean-roc_auc_score: 0.5893
129
+ 2025-09-19 00:40:32,326 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 105
130
+ 2025-09-19 00:40:32,873 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.5893
131
+ 2025-09-19 00:40:37,593 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.4714 | Val mean-roc_auc_score: 0.5974
132
+ 2025-09-19 00:40:37,779 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 140
133
+ 2025-09-19 00:40:38,338 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.5974
134
+ 2025-09-19 00:40:43,051 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.4554 | Val mean-roc_auc_score: 0.5920
135
+ 2025-09-19 00:40:48,042 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4188 | Val mean-roc_auc_score: 0.6027
136
+ 2025-09-19 00:40:48,679 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 210
137
+ 2025-09-19 00:40:49,207 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.6027
138
+ 2025-09-19 00:40:54,078 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.4000 | Val mean-roc_auc_score: 0.6004
139
+ 2025-09-19 00:40:58,894 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.3768 | Val mean-roc_auc_score: 0.6074
140
+ 2025-09-19 00:40:59,078 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 280
141
+ 2025-09-19 00:40:59,627 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.6074
142
+ 2025-09-19 00:41:04,511 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3500 | Val mean-roc_auc_score: 0.6101
143
+ 2025-09-19 00:41:04,692 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 315
144
+ 2025-09-19 00:41:05,224 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.6101
145
+ 2025-09-19 00:41:09,915 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3107 | Val mean-roc_auc_score: 0.6025
146
+ 2025-09-19 00:41:14,546 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2929 | Val mean-roc_auc_score: 0.6047
147
+ 2025-09-19 00:41:19,884 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2734 | Val mean-roc_auc_score: 0.6058
148
+ 2025-09-19 00:41:24,989 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2536 | Val mean-roc_auc_score: 0.6033
149
+ 2025-09-19 00:41:29,842 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2411 | Val mean-roc_auc_score: 0.6076
150
+ 2025-09-19 00:41:34,856 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.2387 | Val mean-roc_auc_score: 0.6225
151
+ 2025-09-19 00:41:35,005 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 525
152
+ 2025-09-19 00:41:35,544 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 15 with val mean-roc_auc_score: 0.6225
153
+ 2025-09-19 00:41:40,484 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.2268 | Val mean-roc_auc_score: 0.6080
154
+ 2025-09-19 00:41:46,105 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.2071 | Val mean-roc_auc_score: 0.6062
155
+ 2025-09-19 00:41:50,707 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1979 | Val mean-roc_auc_score: 0.6096
156
+ 2025-09-19 00:41:55,762 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1884 | Val mean-roc_auc_score: 0.6117
157
+ 2025-09-19 00:42:00,960 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1821 | Val mean-roc_auc_score: 0.6087
158
+ 2025-09-19 00:42:06,299 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1714 | Val mean-roc_auc_score: 0.6078
159
+ 2025-09-19 00:42:11,639 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1723 | Val mean-roc_auc_score: 0.6102
160
+ 2025-09-19 00:42:16,832 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1789 | Val mean-roc_auc_score: 0.6119
161
+ 2025-09-19 00:42:21,836 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1589 | Val mean-roc_auc_score: 0.6011
162
+ 2025-09-19 00:42:27,050 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.1518 | Val mean-roc_auc_score: 0.6154
163
+ 2025-09-19 00:42:32,463 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.1547 | Val mean-roc_auc_score: 0.6135
164
+ 2025-09-19 00:42:38,464 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1464 | Val mean-roc_auc_score: 0.6080
165
+ 2025-09-19 00:42:43,601 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.1420 | Val mean-roc_auc_score: 0.6063
166
+ 2025-09-19 00:42:49,587 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1396 | Val mean-roc_auc_score: 0.6099
167
+ 2025-09-19 00:42:54,695 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.1348 | Val mean-roc_auc_score: 0.6039
168
+ 2025-09-19 00:42:59,679 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.1295 | Val mean-roc_auc_score: 0.5966
169
+ 2025-09-19 00:43:05,168 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1273 | Val mean-roc_auc_score: 0.6012
170
+ 2025-09-19 00:43:10,211 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.1223 | Val mean-roc_auc_score: 0.6085
171
+ 2025-09-19 00:43:15,474 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.1187 | Val mean-roc_auc_score: 0.6018
172
+ 2025-09-19 00:43:20,738 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.1200 | Val mean-roc_auc_score: 0.6020
173
+ 2025-09-19 00:43:26,164 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.1187 | Val mean-roc_auc_score: 0.6171
174
+ 2025-09-19 00:43:31,763 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.1187 | Val mean-roc_auc_score: 0.6108
175
+ 2025-09-19 00:43:36,992 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.1130 | Val mean-roc_auc_score: 0.6005
176
+ 2025-09-19 00:43:42,251 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.1089 | Val mean-roc_auc_score: 0.6061
177
+ 2025-09-19 00:43:47,821 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.1103 | Val mean-roc_auc_score: 0.5972
178
+ 2025-09-19 00:43:53,011 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.1062 | Val mean-roc_auc_score: 0.5935
179
+ 2025-09-19 00:43:58,645 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.1058 | Val mean-roc_auc_score: 0.6011
180
+ 2025-09-19 00:44:03,900 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.1023 | Val mean-roc_auc_score: 0.6017
181
+ 2025-09-19 00:44:09,027 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.1022 | Val mean-roc_auc_score: 0.5983
182
+ 2025-09-19 00:44:14,355 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.1036 | Val mean-roc_auc_score: 0.6045
183
+ 2025-09-19 00:44:19,841 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.1023 | Val mean-roc_auc_score: 0.5937
184
+ 2025-09-19 00:44:25,258 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.1004 | Val mean-roc_auc_score: 0.5973
185
+ 2025-09-19 00:44:30,416 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.1000 | Val mean-roc_auc_score: 0.6010
186
+ 2025-09-19 00:44:35,522 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.1000 | Val mean-roc_auc_score: 0.6041
187
+ 2025-09-19 00:44:40,868 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0955 | Val mean-roc_auc_score: 0.6051
188
+ 2025-09-19 00:44:45,948 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0924 | Val mean-roc_auc_score: 0.5994
189
+ 2025-09-19 00:44:51,645 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0973 | Val mean-roc_auc_score: 0.5979
190
+ 2025-09-19 00:44:57,361 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0942 | Val mean-roc_auc_score: 0.5942
191
+ 2025-09-19 00:45:02,957 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0920 | Val mean-roc_auc_score: 0.6024
192
+ 2025-09-19 00:45:07,926 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0900 | Val mean-roc_auc_score: 0.6013
193
+ 2025-09-19 00:45:12,745 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0897 | Val mean-roc_auc_score: 0.5908
194
+ 2025-09-19 00:45:17,526 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0857 | Val mean-roc_auc_score: 0.5939
195
+ 2025-09-19 00:45:23,467 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0896 | Val mean-roc_auc_score: 0.5943
196
+ 2025-09-19 00:45:28,384 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0853 | Val mean-roc_auc_score: 0.5893
197
+ 2025-09-19 00:45:33,238 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0853 | Val mean-roc_auc_score: 0.5994
198
+ 2025-09-19 00:45:38,016 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0844 | Val mean-roc_auc_score: 0.6013
199
+ 2025-09-19 00:45:43,313 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0862 | Val mean-roc_auc_score: 0.5969
200
+ 2025-09-19 00:45:48,514 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0930 | Val mean-roc_auc_score: 0.5989
201
+ 2025-09-19 00:45:53,473 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0857 | Val mean-roc_auc_score: 0.6012
202
+ 2025-09-19 00:45:58,720 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0875 | Val mean-roc_auc_score: 0.5979
203
+ 2025-09-19 00:46:03,996 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0895 | Val mean-roc_auc_score: 0.5950
204
+ 2025-09-19 00:46:09,383 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0826 | Val mean-roc_auc_score: 0.5985
205
+ 2025-09-19 00:46:14,395 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0857 | Val mean-roc_auc_score: 0.5899
206
+ 2025-09-19 00:46:19,544 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0823 | Val mean-roc_auc_score: 0.5959
207
+ 2025-09-19 00:46:24,927 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.5960
208
+ 2025-09-19 00:46:29,871 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.5982
209
+ 2025-09-19 00:46:35,235 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0820 | Val mean-roc_auc_score: 0.5943
210
+ 2025-09-19 00:46:40,273 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0799 | Val mean-roc_auc_score: 0.5950
211
+ 2025-09-19 00:46:45,521 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0786 | Val mean-roc_auc_score: 0.5989
212
+ 2025-09-19 00:46:50,287 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0791 | Val mean-roc_auc_score: 0.5999
213
+ 2025-09-19 00:46:55,203 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0772 | Val mean-roc_auc_score: 0.5929
214
+ 2025-09-19 00:47:00,650 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0772 | Val mean-roc_auc_score: 0.5935
215
+ 2025-09-19 00:47:05,722 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.5903
216
+ 2025-09-19 00:47:10,734 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.5938
217
+ 2025-09-19 00:47:16,271 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0754 | Val mean-roc_auc_score: 0.5954
218
+ 2025-09-19 00:47:21,953 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0754 | Val mean-roc_auc_score: 0.5919
219
+ 2025-09-19 00:47:27,901 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0732 | Val mean-roc_auc_score: 0.5959
220
+ 2025-09-19 00:47:33,463 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0754 | Val mean-roc_auc_score: 0.5916
221
+ 2025-09-19 00:47:38,982 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0741 | Val mean-roc_auc_score: 0.5947
222
+ 2025-09-19 00:47:44,491 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0768 | Val mean-roc_auc_score: 0.5902
223
+ 2025-09-19 00:47:50,499 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.5892
224
+ 2025-09-19 00:47:56,530 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0754 | Val mean-roc_auc_score: 0.5982
225
+ 2025-09-19 00:48:01,845 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0719 | Val mean-roc_auc_score: 0.5958
226
+ 2025-09-19 00:48:06,849 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0745 | Val mean-roc_auc_score: 0.5908
227
+ 2025-09-19 00:48:11,595 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0723 | Val mean-roc_auc_score: 0.5975
228
+ 2025-09-19 00:48:16,430 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0732 | Val mean-roc_auc_score: 0.5940
229
+ 2025-09-19 00:48:21,767 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0703 | Val mean-roc_auc_score: 0.5989
230
+ 2025-09-19 00:48:26,369 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0705 | Val mean-roc_auc_score: 0.6035
231
+ 2025-09-19 00:48:31,403 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0705 | Val mean-roc_auc_score: 0.5894
232
+ 2025-09-19 00:48:36,449 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0716 | Val mean-roc_auc_score: 0.5978
233
+ 2025-09-19 00:48:41,507 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0719 | Val mean-roc_auc_score: 0.5891
234
+ 2025-09-19 00:48:46,828 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0701 | Val mean-roc_auc_score: 0.5931
235
+ 2025-09-19 00:48:51,857 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0698 | Val mean-roc_auc_score: 0.5959
236
+ 2025-09-19 00:48:57,104 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0719 | Val mean-roc_auc_score: 0.5929
237
+ 2025-09-19 00:49:02,121 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0692 | Val mean-roc_auc_score: 0.5914
238
+ 2025-09-19 00:49:02,801 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.6250
239
+ 2025-09-19 00:49:03,270 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset sider at 2025-09-19_00-49-03
240
+ 2025-09-19 00:49:07,448 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5357 | Val mean-roc_auc_score: 0.5507
241
+ 2025-09-19 00:49:07,448 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 35
242
+ 2025-09-19 00:49:07,989 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.5507
243
+ 2025-09-19 00:49:12,819 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.5000 | Val mean-roc_auc_score: 0.5577
244
+ 2025-09-19 00:49:12,988 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 70
245
+ 2025-09-19 00:49:13,510 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.5577
246
+ 2025-09-19 00:49:18,222 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.5031 | Val mean-roc_auc_score: 0.5865
247
+ 2025-09-19 00:49:18,402 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 105
248
+ 2025-09-19 00:49:18,929 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.5865
249
+ 2025-09-19 00:49:23,875 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.4714 | Val mean-roc_auc_score: 0.5869
250
+ 2025-09-19 00:49:24,057 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 140
251
+ 2025-09-19 00:49:24,585 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.5869
252
+ 2025-09-19 00:49:29,316 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.4482 | Val mean-roc_auc_score: 0.5955
253
+ 2025-09-19 00:49:29,507 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 175
254
+ 2025-09-19 00:49:30,050 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.5955
255
+ 2025-09-19 00:49:34,394 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4156 | Val mean-roc_auc_score: 0.5874
256
+ 2025-09-19 00:49:39,961 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.3911 | Val mean-roc_auc_score: 0.6069
257
+ 2025-09-19 00:49:40,139 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 245
258
+ 2025-09-19 00:49:40,661 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val mean-roc_auc_score: 0.6069
259
+ 2025-09-19 00:49:45,032 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.3625 | Val mean-roc_auc_score: 0.6095
260
+ 2025-09-19 00:49:45,220 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 280
261
+ 2025-09-19 00:49:45,751 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.6095
262
+ 2025-09-19 00:49:50,720 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3333 | Val mean-roc_auc_score: 0.6280
263
+ 2025-09-19 00:49:50,909 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 315
264
+ 2025-09-19 00:49:51,442 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val mean-roc_auc_score: 0.6280
265
+ 2025-09-19 00:49:56,042 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3018 | Val mean-roc_auc_score: 0.6249
266
+ 2025-09-19 00:50:00,926 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.3000 | Val mean-roc_auc_score: 0.6237
267
+ 2025-09-19 00:50:06,179 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2703 | Val mean-roc_auc_score: 0.6284
268
+ 2025-09-19 00:50:06,323 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 420
269
+ 2025-09-19 00:50:06,849 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val mean-roc_auc_score: 0.6284
270
+ 2025-09-19 00:50:10,932 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2518 | Val mean-roc_auc_score: 0.6253
271
+ 2025-09-19 00:50:16,096 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2339 | Val mean-roc_auc_score: 0.6268
272
+ 2025-09-19 00:50:21,311 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.2250 | Val mean-roc_auc_score: 0.6331
273
+ 2025-09-19 00:50:21,456 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Global step of best model: 525
274
+ 2025-09-19 00:50:21,988 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Best model saved at epoch 15 with val mean-roc_auc_score: 0.6331
275
+ 2025-09-19 00:50:26,986 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.2179 | Val mean-roc_auc_score: 0.6066
276
+ 2025-09-19 00:50:32,145 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.2098 | Val mean-roc_auc_score: 0.6252
277
+ 2025-09-19 00:50:36,782 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1927 | Val mean-roc_auc_score: 0.6188
278
+ 2025-09-19 00:50:42,342 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1821 | Val mean-roc_auc_score: 0.6152
279
+ 2025-09-19 00:50:48,028 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1759 | Val mean-roc_auc_score: 0.6184
280
+ 2025-09-19 00:50:53,330 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1723 | Val mean-roc_auc_score: 0.6116
281
+ 2025-09-19 00:50:59,095 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1625 | Val mean-roc_auc_score: 0.6159
282
+ 2025-09-19 00:51:04,546 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1609 | Val mean-roc_auc_score: 0.6239
283
+ 2025-09-19 00:51:09,897 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1562 | Val mean-roc_auc_score: 0.6223
284
+ 2025-09-19 00:51:15,172 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.1509 | Val mean-roc_auc_score: 0.6185
285
+ 2025-09-19 00:51:20,674 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.1531 | Val mean-roc_auc_score: 0.6193
286
+ 2025-09-19 00:51:25,882 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.1384 | Val mean-roc_auc_score: 0.6166
287
+ 2025-09-19 00:51:31,373 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.1393 | Val mean-roc_auc_score: 0.6243
288
+ 2025-09-19 00:51:37,248 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1375 | Val mean-roc_auc_score: 0.6196
289
+ 2025-09-19 00:51:42,533 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.1304 | Val mean-roc_auc_score: 0.6170
290
+ 2025-09-19 00:51:46,693 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.1250 | Val mean-roc_auc_score: 0.6217
291
+ 2025-09-19 00:51:51,727 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.1297 | Val mean-roc_auc_score: 0.6200
292
+ 2025-09-19 00:51:56,773 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.1196 | Val mean-roc_auc_score: 0.6172
293
+ 2025-09-19 00:52:02,150 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.1179 | Val mean-roc_auc_score: 0.6198
294
+ 2025-09-19 00:52:07,640 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.1200 | Val mean-roc_auc_score: 0.6251
295
+ 2025-09-19 00:52:13,304 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.1187 | Val mean-roc_auc_score: 0.6257
296
+ 2025-09-19 00:52:18,998 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.1134 | Val mean-roc_auc_score: 0.6249
297
+ 2025-09-19 00:52:24,149 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.1156 | Val mean-roc_auc_score: 0.6303
298
+ 2025-09-19 00:52:29,409 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.1098 | Val mean-roc_auc_score: 0.6127
299
+ 2025-09-19 00:52:34,886 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.1062 | Val mean-roc_auc_score: 0.6107
300
+ 2025-09-19 00:52:40,088 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.1062 | Val mean-roc_auc_score: 0.6173
301
+ 2025-09-19 00:52:45,727 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.1085 | Val mean-roc_auc_score: 0.6168
302
+ 2025-09-19 00:52:50,949 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.1078 | Val mean-roc_auc_score: 0.6272
303
+ 2025-09-19 00:52:55,967 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.1022 | Val mean-roc_auc_score: 0.6227
304
+ 2025-09-19 00:53:01,265 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0991 | Val mean-roc_auc_score: 0.6180
305
+ 2025-09-19 00:53:06,773 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.1008 | Val mean-roc_auc_score: 0.6170
306
+ 2025-09-19 00:53:12,073 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0973 | Val mean-roc_auc_score: 0.6157
307
+ 2025-09-19 00:53:17,053 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0938 | Val mean-roc_auc_score: 0.6211
308
+ 2025-09-19 00:53:22,270 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0984 | Val mean-roc_auc_score: 0.6234
309
+ 2025-09-19 00:53:27,686 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0951 | Val mean-roc_auc_score: 0.6179
310
+ 2025-09-19 00:53:32,880 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0929 | Val mean-roc_auc_score: 0.6264
311
+ 2025-09-19 00:53:39,053 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0992 | Val mean-roc_auc_score: 0.6163
312
+ 2025-09-19 00:53:44,853 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0915 | Val mean-roc_auc_score: 0.6182
313
+ 2025-09-19 00:53:51,040 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0888 | Val mean-roc_auc_score: 0.6172
314
+ 2025-09-19 00:53:56,678 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0912 | Val mean-roc_auc_score: 0.6172
315
+ 2025-09-19 00:54:02,133 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0879 | Val mean-roc_auc_score: 0.6199
316
+ 2025-09-19 00:54:08,152 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0871 | Val mean-roc_auc_score: 0.6136
317
+ 2025-09-19 00:54:14,585 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0880 | Val mean-roc_auc_score: 0.6227
318
+ 2025-09-19 00:54:19,998 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0879 | Val mean-roc_auc_score: 0.6124
319
+ 2025-09-19 00:54:25,588 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0875 | Val mean-roc_auc_score: 0.6192
320
+ 2025-09-19 00:54:31,481 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0844 | Val mean-roc_auc_score: 0.6187
321
+ 2025-09-19 00:54:37,736 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0835 | Val mean-roc_auc_score: 0.6141
322
+ 2025-09-19 00:54:43,616 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0875 | Val mean-roc_auc_score: 0.6213
323
+ 2025-09-19 00:54:49,665 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0839 | Val mean-roc_auc_score: 0.6194
324
+ 2025-09-19 00:54:55,497 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0853 | Val mean-roc_auc_score: 0.6250
325
+ 2025-09-19 00:55:01,609 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0848 | Val mean-roc_auc_score: 0.6173
326
+ 2025-09-19 00:55:07,761 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0830 | Val mean-roc_auc_score: 0.6187
327
+ 2025-09-19 00:55:13,584 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0799 | Val mean-roc_auc_score: 0.6163
328
+ 2025-09-19 00:55:19,694 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.6204
329
+ 2025-09-19 00:55:25,695 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.6180
330
+ 2025-09-19 00:55:31,829 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0768 | Val mean-roc_auc_score: 0.6136
331
+ 2025-09-19 00:55:38,092 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0820 | Val mean-roc_auc_score: 0.6125
332
+ 2025-09-19 00:55:44,038 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0763 | Val mean-roc_auc_score: 0.6188
333
+ 2025-09-19 00:55:49,937 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0777 | Val mean-roc_auc_score: 0.6166
334
+ 2025-09-19 00:55:56,003 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0800 | Val mean-roc_auc_score: 0.6214
335
+ 2025-09-19 00:56:01,944 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0813 | Val mean-roc_auc_score: 0.6163
336
+ 2025-09-19 00:56:08,343 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0808 | Val mean-roc_auc_score: 0.6149
337
+ 2025-09-19 00:56:13,881 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0771 | Val mean-roc_auc_score: 0.6096
338
+ 2025-09-19 00:56:19,714 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.6182
339
+ 2025-09-19 00:56:25,093 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0741 | Val mean-roc_auc_score: 0.6180
340
+ 2025-09-19 00:56:30,625 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0750 | Val mean-roc_auc_score: 0.6113
341
+ 2025-09-19 00:56:36,484 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0750 | Val mean-roc_auc_score: 0.6128
342
+ 2025-09-19 00:56:42,580 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0852 | Val mean-roc_auc_score: 0.6154
343
+ 2025-09-19 00:56:48,491 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0750 | Val mean-roc_auc_score: 0.6140
344
+ 2025-09-19 00:56:54,203 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0723 | Val mean-roc_auc_score: 0.6116
345
+ 2025-09-19 00:57:00,719 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0730 | Val mean-roc_auc_score: 0.6090
346
+ 2025-09-19 00:57:06,739 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0732 | Val mean-roc_auc_score: 0.6132
347
+ 2025-09-19 00:57:11,810 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0710 | Val mean-roc_auc_score: 0.6140
348
+ 2025-09-19 00:57:17,461 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0698 | Val mean-roc_auc_score: 0.6146
349
+ 2025-09-19 00:57:23,465 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0714 | Val mean-roc_auc_score: 0.6189
350
+ 2025-09-19 00:57:29,402 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0728 | Val mean-roc_auc_score: 0.6152
351
+ 2025-09-19 00:57:35,484 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0742 | Val mean-roc_auc_score: 0.6129
352
+ 2025-09-19 00:57:41,400 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0696 | Val mean-roc_auc_score: 0.6162
353
+ 2025-09-19 00:57:47,390 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0719 | Val mean-roc_auc_score: 0.6063
354
+ 2025-09-19 00:57:53,362 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0719 | Val mean-roc_auc_score: 0.6124
355
+ 2025-09-19 00:57:59,207 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0705 | Val mean-roc_auc_score: 0.6126
356
+ 2025-09-19 00:58:05,579 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0705 | Val mean-roc_auc_score: 0.6133
357
+ 2025-09-19 00:58:11,470 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0708 | Val mean-roc_auc_score: 0.6138
358
+ 2025-09-19 00:58:17,520 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0692 | Val mean-roc_auc_score: 0.6117
359
+ 2025-09-19 00:58:23,472 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.6144
360
+ 2025-09-19 00:58:24,754 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.6342
361
+ 2025-09-19 00:58:25,255 - logs_modchembert_sider_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.6303, Std Dev: 0.0039
logs_modchembert_classification_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_tox21_epochs100_batch_size32_20250918_231229.log ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 23:12:29,456 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Running benchmark for dataset: tox21
2
+ 2025-09-18 23:12:29,456 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - dataset: tox21, tasks: ['NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD', 'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53'], epochs: 100, learning rate: 3e-05
3
+ 2025-09-18 23:12:29,470 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset tox21 at 2025-09-18_23-12-29
4
+ 2025-09-18 23:12:44,298 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1732 | Val mean-roc_auc_score: 0.7326
5
+ 2025-09-18 23:12:44,298 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 196
6
+ 2025-09-18 23:12:44,833 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7326
7
+ 2025-09-18 23:13:00,286 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1637 | Val mean-roc_auc_score: 0.7578
8
+ 2025-09-18 23:13:00,431 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 392
9
+ 2025-09-18 23:13:00,976 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7578
10
+ 2025-09-18 23:13:16,119 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1555 | Val mean-roc_auc_score: 0.7625
11
+ 2025-09-18 23:13:16,304 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 588
12
+ 2025-09-18 23:13:16,850 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7625
13
+ 2025-09-18 23:13:32,183 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1555 | Val mean-roc_auc_score: 0.7688
14
+ 2025-09-18 23:13:32,366 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 784
15
+ 2025-09-18 23:13:32,901 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val mean-roc_auc_score: 0.7688
16
+ 2025-09-18 23:13:48,215 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1523 | Val mean-roc_auc_score: 0.7698
17
+ 2025-09-18 23:13:48,401 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 980
18
+ 2025-09-18 23:13:48,947 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.7698
19
+ 2025-09-18 23:14:05,648 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1480 | Val mean-roc_auc_score: 0.7822
20
+ 2025-09-18 23:14:06,228 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 1176
21
+ 2025-09-18 23:14:06,782 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.7822
22
+ 2025-09-18 23:14:22,198 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1476 | Val mean-roc_auc_score: 0.7739
23
+ 2025-09-18 23:14:37,894 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1369 | Val mean-roc_auc_score: 0.7706
24
+ 2025-09-18 23:14:53,037 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1357 | Val mean-roc_auc_score: 0.7669
25
+ 2025-09-18 23:15:07,951 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1281 | Val mean-roc_auc_score: 0.7663
26
+ 2025-09-18 23:15:24,688 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1283 | Val mean-roc_auc_score: 0.7723
27
+ 2025-09-18 23:15:40,640 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1202 | Val mean-roc_auc_score: 0.7545
28
+ 2025-09-18 23:15:56,159 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1165 | Val mean-roc_auc_score: 0.7679
29
+ 2025-09-18 23:16:11,742 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1172 | Val mean-roc_auc_score: 0.7681
30
+ 2025-09-18 23:16:27,332 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1141 | Val mean-roc_auc_score: 0.7635
31
+ 2025-09-18 23:16:45,482 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1055 | Val mean-roc_auc_score: 0.7613
32
+ 2025-09-18 23:17:03,651 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1133 | Val mean-roc_auc_score: 0.7638
33
+ 2025-09-18 23:17:19,817 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1055 | Val mean-roc_auc_score: 0.7545
34
+ 2025-09-18 23:17:35,856 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1003 | Val mean-roc_auc_score: 0.7459
35
+ 2025-09-18 23:17:51,699 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1023 | Val mean-roc_auc_score: 0.7588
36
+ 2025-09-18 23:18:09,218 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1060 | Val mean-roc_auc_score: 0.7532
37
+ 2025-09-18 23:18:25,317 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0951 | Val mean-roc_auc_score: 0.7544
38
+ 2025-09-18 23:18:41,670 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1030 | Val mean-roc_auc_score: 0.7567
39
+ 2025-09-18 23:18:58,372 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1211 | Val mean-roc_auc_score: 0.7616
40
+ 2025-09-18 23:19:14,714 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0912 | Val mean-roc_auc_score: 0.7560
41
+ 2025-09-18 23:19:31,688 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0885 | Val mean-roc_auc_score: 0.7502
42
+ 2025-09-18 23:19:48,269 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0839 | Val mean-roc_auc_score: 0.7473
43
+ 2025-09-18 23:20:04,649 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0891 | Val mean-roc_auc_score: 0.7432
44
+ 2025-09-18 23:20:20,786 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0867 | Val mean-roc_auc_score: 0.7470
45
+ 2025-09-18 23:20:37,398 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0828 | Val mean-roc_auc_score: 0.7548
46
+ 2025-09-18 23:20:55,379 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0785 | Val mean-roc_auc_score: 0.7460
47
+ 2025-09-18 23:21:11,574 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0812 | Val mean-roc_auc_score: 0.7517
48
+ 2025-09-18 23:21:27,827 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0749 | Val mean-roc_auc_score: 0.7411
49
+ 2025-09-18 23:21:44,594 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0801 | Val mean-roc_auc_score: 0.7446
50
+ 2025-09-18 23:22:00,692 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0755 | Val mean-roc_auc_score: 0.7378
51
+ 2025-09-18 23:22:18,115 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0765 | Val mean-roc_auc_score: 0.7473
52
+ 2025-09-18 23:22:37,078 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0811 | Val mean-roc_auc_score: 0.7279
53
+ 2025-09-18 23:22:56,571 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0752 | Val mean-roc_auc_score: 0.7349
54
+ 2025-09-18 23:23:15,983 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.7346
55
+ 2025-09-18 23:23:36,307 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0695 | Val mean-roc_auc_score: 0.7435
56
+ 2025-09-18 23:23:56,888 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0751 | Val mean-roc_auc_score: 0.7403
57
+ 2025-09-18 23:24:16,740 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0791 | Val mean-roc_auc_score: 0.7344
58
+ 2025-09-18 23:24:36,553 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0815 | Val mean-roc_auc_score: 0.7437
59
+ 2025-09-18 23:24:56,539 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0732 | Val mean-roc_auc_score: 0.7260
60
+ 2025-09-18 23:25:16,174 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0734 | Val mean-roc_auc_score: 0.7279
61
+ 2025-09-18 23:25:36,989 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0806 | Val mean-roc_auc_score: 0.7433
62
+ 2025-09-18 23:25:54,329 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0635 | Val mean-roc_auc_score: 0.7322
63
+ 2025-09-18 23:26:11,206 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0801 | Val mean-roc_auc_score: 0.7361
64
+ 2025-09-18 23:26:27,284 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0869 | Val mean-roc_auc_score: 0.7369
65
+ 2025-09-18 23:26:43,304 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0656 | Val mean-roc_auc_score: 0.7330
66
+ 2025-09-18 23:27:00,115 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0693 | Val mean-roc_auc_score: 0.7331
67
+ 2025-09-18 23:27:17,861 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0649 | Val mean-roc_auc_score: 0.7317
68
+ 2025-09-18 23:27:34,128 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0636 | Val mean-roc_auc_score: 0.7358
69
+ 2025-09-18 23:27:50,947 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0655 | Val mean-roc_auc_score: 0.7386
70
+ 2025-09-18 23:28:08,771 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0660 | Val mean-roc_auc_score: 0.7349
71
+ 2025-09-18 23:28:25,680 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0646 | Val mean-roc_auc_score: 0.7296
72
+ 2025-09-18 23:28:43,290 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0651 | Val mean-roc_auc_score: 0.7324
73
+ 2025-09-18 23:29:00,165 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0602 | Val mean-roc_auc_score: 0.7284
74
+ 2025-09-18 23:29:17,930 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0645 | Val mean-roc_auc_score: 0.7287
75
+ 2025-09-18 23:29:38,939 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0672 | Val mean-roc_auc_score: 0.7303
76
+ 2025-09-18 23:30:00,255 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0617 | Val mean-roc_auc_score: 0.7310
77
+ 2025-09-18 23:30:22,376 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0643 | Val mean-roc_auc_score: 0.7246
78
+ 2025-09-18 23:30:38,814 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0674 | Val mean-roc_auc_score: 0.7340
79
+ 2025-09-18 23:30:55,377 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0639 | Val mean-roc_auc_score: 0.7374
80
+ 2025-09-18 23:31:12,528 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0645 | Val mean-roc_auc_score: 0.7290
81
+ 2025-09-18 23:31:30,876 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0590 | Val mean-roc_auc_score: 0.7280
82
+ 2025-09-18 23:31:48,663 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0640 | Val mean-roc_auc_score: 0.7305
83
+ 2025-09-18 23:32:06,074 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0639 | Val mean-roc_auc_score: 0.7299
84
+ 2025-09-18 23:32:26,061 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0586 | Val mean-roc_auc_score: 0.7284
85
+ 2025-09-18 23:32:47,038 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0625 | Val mean-roc_auc_score: 0.7301
86
+ 2025-09-18 23:33:08,579 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0557 | Val mean-roc_auc_score: 0.7249
87
+ 2025-09-18 23:33:28,739 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0527 | Val mean-roc_auc_score: 0.7349
88
+ 2025-09-18 23:33:44,657 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0574 | Val mean-roc_auc_score: 0.7323
89
+ 2025-09-18 23:34:01,578 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0654 | Val mean-roc_auc_score: 0.7382
90
+ 2025-09-18 23:34:19,559 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0578 | Val mean-roc_auc_score: 0.7353
91
+ 2025-09-18 23:34:38,161 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0563 | Val mean-roc_auc_score: 0.7324
92
+ 2025-09-18 23:35:01,028 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0557 | Val mean-roc_auc_score: 0.7279
93
+ 2025-09-18 23:35:21,163 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0582 | Val mean-roc_auc_score: 0.7277
94
+ 2025-09-18 23:35:37,201 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7353
95
+ 2025-09-18 23:35:53,592 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7322
96
+ 2025-09-18 23:36:09,349 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7342
97
+ 2025-09-18 23:36:27,015 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0573 | Val mean-roc_auc_score: 0.7351
98
+ 2025-09-18 23:36:47,312 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0577 | Val mean-roc_auc_score: 0.7363
99
+ 2025-09-18 23:37:06,919 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0579 | Val mean-roc_auc_score: 0.7270
100
+ 2025-09-18 23:37:26,837 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0573 | Val mean-roc_auc_score: 0.7235
101
+ 2025-09-18 23:37:47,428 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0564 | Val mean-roc_auc_score: 0.7359
102
+ 2025-09-18 23:38:08,401 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0565 | Val mean-roc_auc_score: 0.7349
103
+ 2025-09-18 23:38:28,387 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0586 | Val mean-roc_auc_score: 0.7334
104
+ 2025-09-18 23:38:48,932 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0575 | Val mean-roc_auc_score: 0.7299
105
+ 2025-09-18 23:39:06,865 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0574 | Val mean-roc_auc_score: 0.7309
106
+ 2025-09-18 23:39:22,924 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0530 | Val mean-roc_auc_score: 0.7383
107
+ 2025-09-18 23:39:40,720 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0566 | Val mean-roc_auc_score: 0.7344
108
+ 2025-09-18 23:39:57,656 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0580 | Val mean-roc_auc_score: 0.7292
109
+ 2025-09-18 23:40:13,665 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0583 | Val mean-roc_auc_score: 0.7343
110
+ 2025-09-18 23:40:30,069 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0570 | Val mean-roc_auc_score: 0.7308
111
+ 2025-09-18 23:40:46,906 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0615 | Val mean-roc_auc_score: 0.7312
112
+ 2025-09-18 23:41:04,061 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0645 | Val mean-roc_auc_score: 0.7345
113
+ 2025-09-18 23:41:20,698 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0522 | Val mean-roc_auc_score: 0.7241
114
+ 2025-09-18 23:41:37,309 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0486 | Val mean-roc_auc_score: 0.7326
115
+ 2025-09-18 23:41:53,553 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0512 | Val mean-roc_auc_score: 0.7356
116
+ 2025-09-18 23:41:54,791 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7370
117
+ 2025-09-18 23:41:55,211 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset tox21 at 2025-09-18_23-41-55
118
+ 2025-09-18 23:42:10,868 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1758 | Val mean-roc_auc_score: 0.7482
119
+ 2025-09-18 23:42:10,868 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 196
120
+ 2025-09-18 23:42:11,395 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7482
121
+ 2025-09-18 23:42:27,749 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1624 | Val mean-roc_auc_score: 0.7519
122
+ 2025-09-18 23:42:27,938 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 392
123
+ 2025-09-18 23:42:28,473 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7519
124
+ 2025-09-18 23:42:44,833 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1548 | Val mean-roc_auc_score: 0.7713
125
+ 2025-09-18 23:42:45,016 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 588
126
+ 2025-09-18 23:42:45,551 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7713
127
+ 2025-09-18 23:43:02,196 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1518 | Val mean-roc_auc_score: 0.7669
128
+ 2025-09-18 23:43:18,375 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1500 | Val mean-roc_auc_score: 0.7671
129
+ 2025-09-18 23:43:35,820 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1530 | Val mean-roc_auc_score: 0.7739
130
+ 2025-09-18 23:43:36,343 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 1176
131
+ 2025-09-18 23:43:36,875 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.7739
132
+ 2025-09-18 23:43:53,484 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1398 | Val mean-roc_auc_score: 0.7621
133
+ 2025-09-18 23:44:10,101 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1379 | Val mean-roc_auc_score: 0.7721
134
+ 2025-09-18 23:44:25,726 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1377 | Val mean-roc_auc_score: 0.7639
135
+ 2025-09-18 23:44:42,399 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1276 | Val mean-roc_auc_score: 0.7718
136
+ 2025-09-18 23:44:59,878 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1311 | Val mean-roc_auc_score: 0.7670
137
+ 2025-09-18 23:45:16,886 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1160 | Val mean-roc_auc_score: 0.7613
138
+ 2025-09-18 23:45:33,052 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1133 | Val mean-roc_auc_score: 0.7578
139
+ 2025-09-18 23:45:49,014 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1179 | Val mean-roc_auc_score: 0.7573
140
+ 2025-09-18 23:46:05,373 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1070 | Val mean-roc_auc_score: 0.7550
141
+ 2025-09-18 23:46:22,543 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1033 | Val mean-roc_auc_score: 0.7479
142
+ 2025-09-18 23:46:39,192 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0967 | Val mean-roc_auc_score: 0.7523
143
+ 2025-09-18 23:46:55,780 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1088 | Val mean-roc_auc_score: 0.7568
144
+ 2025-09-18 23:47:12,233 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1061 | Val mean-roc_auc_score: 0.7532
145
+ 2025-09-18 23:47:28,007 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1031 | Val mean-roc_auc_score: 0.7445
146
+ 2025-09-18 23:47:45,999 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0859 | Val mean-roc_auc_score: 0.7421
147
+ 2025-09-18 23:48:03,130 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0898 | Val mean-roc_auc_score: 0.7432
148
+ 2025-09-18 23:48:19,248 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0894 | Val mean-roc_auc_score: 0.7375
149
+ 2025-09-18 23:48:35,668 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.7421
150
+ 2025-09-18 23:48:52,004 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0869 | Val mean-roc_auc_score: 0.7450
151
+ 2025-09-18 23:49:09,053 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0885 | Val mean-roc_auc_score: 0.7415
152
+ 2025-09-18 23:49:25,211 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0890 | Val mean-roc_auc_score: 0.7400
153
+ 2025-09-18 23:49:41,732 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0863 | Val mean-roc_auc_score: 0.7517
154
+ 2025-09-18 23:49:58,557 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0811 | Val mean-roc_auc_score: 0.7312
155
+ 2025-09-18 23:50:14,645 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0773 | Val mean-roc_auc_score: 0.7462
156
+ 2025-09-18 23:50:31,290 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0802 | Val mean-roc_auc_score: 0.7491
157
+ 2025-09-18 23:50:48,170 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0760 | Val mean-roc_auc_score: 0.7447
158
+ 2025-09-18 23:51:04,605 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0841 | Val mean-roc_auc_score: 0.7368
159
+ 2025-09-18 23:51:20,366 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0806 | Val mean-roc_auc_score: 0.7424
160
+ 2025-09-18 23:51:36,801 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0792 | Val mean-roc_auc_score: 0.7361
161
+ 2025-09-18 23:51:53,744 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0725 | Val mean-roc_auc_score: 0.7374
162
+ 2025-09-18 23:52:09,867 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0745 | Val mean-roc_auc_score: 0.7396
163
+ 2025-09-18 23:52:26,479 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0840 | Val mean-roc_auc_score: 0.7395
164
+ 2025-09-18 23:52:41,896 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0788 | Val mean-roc_auc_score: 0.7379
165
+ 2025-09-18 23:52:57,970 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0723 | Val mean-roc_auc_score: 0.7367
166
+ 2025-09-18 23:53:15,090 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0764 | Val mean-roc_auc_score: 0.7339
167
+ 2025-09-18 23:53:31,373 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0762 | Val mean-roc_auc_score: 0.7370
168
+ 2025-09-18 23:53:47,727 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0684 | Val mean-roc_auc_score: 0.7321
169
+ 2025-09-18 23:54:04,559 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0703 | Val mean-roc_auc_score: 0.7443
170
+ 2025-09-18 23:54:20,190 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0793 | Val mean-roc_auc_score: 0.7421
171
+ 2025-09-18 23:54:37,429 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0708 | Val mean-roc_auc_score: 0.7345
172
+ 2025-09-18 23:54:54,387 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0671 | Val mean-roc_auc_score: 0.7340
173
+ 2025-09-18 23:55:11,104 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0693 | Val mean-roc_auc_score: 0.7430
174
+ 2025-09-18 23:55:26,981 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0742 | Val mean-roc_auc_score: 0.7356
175
+ 2025-09-18 23:55:43,434 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0653 | Val mean-roc_auc_score: 0.7328
176
+ 2025-09-18 23:55:59,688 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0632 | Val mean-roc_auc_score: 0.7265
177
+ 2025-09-18 23:56:17,269 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0662 | Val mean-roc_auc_score: 0.7356
178
+ 2025-09-18 23:56:33,451 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0614 | Val mean-roc_auc_score: 0.7341
179
+ 2025-09-18 23:56:49,948 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0685 | Val mean-roc_auc_score: 0.7366
180
+ 2025-09-18 23:57:06,595 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0609 | Val mean-roc_auc_score: 0.7348
181
+ 2025-09-18 23:57:22,208 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0633 | Val mean-roc_auc_score: 0.7336
182
+ 2025-09-18 23:57:39,639 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0629 | Val mean-roc_auc_score: 0.7349
183
+ 2025-09-18 23:57:55,979 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0643 | Val mean-roc_auc_score: 0.7412
184
+ 2025-09-18 23:58:12,255 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0664 | Val mean-roc_auc_score: 0.7347
185
+ 2025-09-18 23:58:28,350 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0620 | Val mean-roc_auc_score: 0.7354
186
+ 2025-09-18 23:58:44,741 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0617 | Val mean-roc_auc_score: 0.7311
187
+ 2025-09-18 23:59:02,526 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0625 | Val mean-roc_auc_score: 0.7324
188
+ 2025-09-18 23:59:18,526 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0622 | Val mean-roc_auc_score: 0.7368
189
+ 2025-09-18 23:59:34,601 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0572 | Val mean-roc_auc_score: 0.7322
190
+ 2025-09-18 23:59:51,115 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0582 | Val mean-roc_auc_score: 0.7366
191
+ 2025-09-19 00:00:07,488 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0712 | Val mean-roc_auc_score: 0.7344
192
+ 2025-09-19 00:00:24,453 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0591 | Val mean-roc_auc_score: 0.7293
193
+ 2025-09-19 00:00:40,937 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0633 | Val mean-roc_auc_score: 0.7327
194
+ 2025-09-19 00:00:57,588 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0618 | Val mean-roc_auc_score: 0.7313
195
+ 2025-09-19 00:01:13,520 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0648 | Val mean-roc_auc_score: 0.7240
196
+ 2025-09-19 00:01:29,835 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0693 | Val mean-roc_auc_score: 0.7319
197
+ 2025-09-19 00:01:47,610 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0680 | Val mean-roc_auc_score: 0.7301
198
+ 2025-09-19 00:02:03,954 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0640 | Val mean-roc_auc_score: 0.7326
199
+ 2025-09-19 00:02:19,985 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0586 | Val mean-roc_auc_score: 0.7384
200
+ 2025-09-19 00:02:36,321 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0578 | Val mean-roc_auc_score: 0.7338
201
+ 2025-09-19 00:02:52,906 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0573 | Val mean-roc_auc_score: 0.7359
202
+ 2025-09-19 00:03:10,630 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7317
203
+ 2025-09-19 00:03:26,440 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0572 | Val mean-roc_auc_score: 0.7313
204
+ 2025-09-19 00:03:42,967 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7272
205
+ 2025-09-19 00:03:59,623 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0578 | Val mean-roc_auc_score: 0.7232
206
+ 2025-09-19 00:04:15,502 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0592 | Val mean-roc_auc_score: 0.7307
207
+ 2025-09-19 00:04:32,956 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0599 | Val mean-roc_auc_score: 0.7440
208
+ 2025-09-19 00:04:49,610 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0572 | Val mean-roc_auc_score: 0.7332
209
+ 2025-09-19 00:05:05,802 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0557 | Val mean-roc_auc_score: 0.7282
210
+ 2025-09-19 00:05:21,890 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0557 | Val mean-roc_auc_score: 0.7265
211
+ 2025-09-19 00:05:38,449 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0561 | Val mean-roc_auc_score: 0.7263
212
+ 2025-09-19 00:05:55,985 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0568 | Val mean-roc_auc_score: 0.7281
213
+ 2025-09-19 00:06:12,522 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0609 | Val mean-roc_auc_score: 0.7295
214
+ 2025-09-19 00:06:28,490 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0497 | Val mean-roc_auc_score: 0.7299
215
+ 2025-09-19 00:06:44,853 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0551 | Val mean-roc_auc_score: 0.7313
216
+ 2025-09-19 00:07:01,412 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0543 | Val mean-roc_auc_score: 0.7302
217
+ 2025-09-19 00:07:18,526 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0552 | Val mean-roc_auc_score: 0.7368
218
+ 2025-09-19 00:07:33,333 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0522 | Val mean-roc_auc_score: 0.7301
219
+ 2025-09-19 00:07:48,295 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0527 | Val mean-roc_auc_score: 0.7285
220
+ 2025-09-19 00:08:03,160 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0535 | Val mean-roc_auc_score: 0.7258
221
+ 2025-09-19 00:08:18,200 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0496 | Val mean-roc_auc_score: 0.7258
222
+ 2025-09-19 00:08:34,226 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0654 | Val mean-roc_auc_score: 0.7210
223
+ 2025-09-19 00:08:49,112 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0601 | Val mean-roc_auc_score: 0.7164
224
+ 2025-09-19 00:09:04,059 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0679 | Val mean-roc_auc_score: 0.7256
225
+ 2025-09-19 00:09:19,049 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0537 | Val mean-roc_auc_score: 0.7226
226
+ 2025-09-19 00:09:20,040 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7312
227
+ 2025-09-19 00:09:20,491 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset tox21 at 2025-09-19_00-09-20
228
+ 2025-09-19 00:09:34,716 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.1771 | Val mean-roc_auc_score: 0.7317
229
+ 2025-09-19 00:09:34,716 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 196
230
+ 2025-09-19 00:09:35,240 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val mean-roc_auc_score: 0.7317
231
+ 2025-09-19 00:09:49,988 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1610 | Val mean-roc_auc_score: 0.7476
232
+ 2025-09-19 00:09:50,161 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 392
233
+ 2025-09-19 00:09:50,687 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val mean-roc_auc_score: 0.7476
234
+ 2025-09-19 00:10:05,669 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1619 | Val mean-roc_auc_score: 0.7636
235
+ 2025-09-19 00:10:05,847 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 588
236
+ 2025-09-19 00:10:06,377 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val mean-roc_auc_score: 0.7636
237
+ 2025-09-19 00:10:21,299 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1473 | Val mean-roc_auc_score: 0.7588
238
+ 2025-09-19 00:10:36,309 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1484 | Val mean-roc_auc_score: 0.7722
239
+ 2025-09-19 00:10:36,452 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 980
240
+ 2025-09-19 00:10:36,985 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val mean-roc_auc_score: 0.7722
241
+ 2025-09-19 00:10:52,483 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1423 | Val mean-roc_auc_score: 0.7734
242
+ 2025-09-19 00:10:53,115 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 1176
243
+ 2025-09-19 00:10:53,646 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val mean-roc_auc_score: 0.7734
244
+ 2025-09-19 00:11:08,904 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1406 | Val mean-roc_auc_score: 0.7721
245
+ 2025-09-19 00:11:24,245 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1415 | Val mean-roc_auc_score: 0.7735
246
+ 2025-09-19 00:11:24,390 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 1568
247
+ 2025-09-19 00:11:24,922 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val mean-roc_auc_score: 0.7735
248
+ 2025-09-19 00:11:39,996 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1279 | Val mean-roc_auc_score: 0.7508
249
+ 2025-09-19 00:11:55,009 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1281 | Val mean-roc_auc_score: 0.7812
250
+ 2025-09-19 00:11:55,155 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Global step of best model: 1960
251
+ 2025-09-19 00:11:55,709 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val mean-roc_auc_score: 0.7812
252
+ 2025-09-19 00:12:11,536 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1267 | Val mean-roc_auc_score: 0.7676
253
+ 2025-09-19 00:12:26,574 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1232 | Val mean-roc_auc_score: 0.7588
254
+ 2025-09-19 00:12:41,319 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1126 | Val mean-roc_auc_score: 0.7615
255
+ 2025-09-19 00:12:56,250 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1115 | Val mean-roc_auc_score: 0.7580
256
+ 2025-09-19 00:13:11,438 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1117 | Val mean-roc_auc_score: 0.7543
257
+ 2025-09-19 00:13:27,347 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1059 | Val mean-roc_auc_score: 0.7584
258
+ 2025-09-19 00:13:42,632 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1133 | Val mean-roc_auc_score: 0.7479
259
+ 2025-09-19 00:13:57,515 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1071 | Val mean-roc_auc_score: 0.7503
260
+ 2025-09-19 00:14:12,484 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1120 | Val mean-roc_auc_score: 0.7552
261
+ 2025-09-19 00:14:27,512 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1000 | Val mean-roc_auc_score: 0.7518
262
+ 2025-09-19 00:14:42,869 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0986 | Val mean-roc_auc_score: 0.7496
263
+ 2025-09-19 00:14:58,346 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1016 | Val mean-roc_auc_score: 0.7510
264
+ 2025-09-19 00:15:13,372 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0850 | Val mean-roc_auc_score: 0.7471
265
+ 2025-09-19 00:15:28,448 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1113 | Val mean-roc_auc_score: 0.7514
266
+ 2025-09-19 00:15:43,480 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0844 | Val mean-roc_auc_score: 0.7479
267
+ 2025-09-19 00:15:59,437 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0859 | Val mean-roc_auc_score: 0.7449
268
+ 2025-09-19 00:16:14,832 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0832 | Val mean-roc_auc_score: 0.7427
269
+ 2025-09-19 00:16:29,750 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0852 | Val mean-roc_auc_score: 0.7362
270
+ 2025-09-19 00:16:44,653 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0822 | Val mean-roc_auc_score: 0.7390
271
+ 2025-09-19 00:16:59,724 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0848 | Val mean-roc_auc_score: 0.7337
272
+ 2025-09-19 00:17:15,542 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0802 | Val mean-roc_auc_score: 0.7405
273
+ 2025-09-19 00:17:30,721 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0829 | Val mean-roc_auc_score: 0.7364
274
+ 2025-09-19 00:17:45,500 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0767 | Val mean-roc_auc_score: 0.7282
275
+ 2025-09-19 00:18:00,411 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0747 | Val mean-roc_auc_score: 0.7371
276
+ 2025-09-19 00:18:15,368 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0776 | Val mean-roc_auc_score: 0.7363
277
+ 2025-09-19 00:18:30,996 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0770 | Val mean-roc_auc_score: 0.7387
278
+ 2025-09-19 00:18:46,117 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0781 | Val mean-roc_auc_score: 0.7393
279
+ 2025-09-19 00:19:01,002 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0788 | Val mean-roc_auc_score: 0.7354
280
+ 2025-09-19 00:19:16,056 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0746 | Val mean-roc_auc_score: 0.7419
281
+ 2025-09-19 00:19:31,093 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0734 | Val mean-roc_auc_score: 0.7300
282
+ 2025-09-19 00:19:46,516 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0807 | Val mean-roc_auc_score: 0.7432
283
+ 2025-09-19 00:20:01,831 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0723 | Val mean-roc_auc_score: 0.7290
284
+ 2025-09-19 00:20:16,738 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0737 | Val mean-roc_auc_score: 0.7315
285
+ 2025-09-19 00:20:31,721 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0736 | Val mean-roc_auc_score: 0.7399
286
+ 2025-09-19 00:20:46,710 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0711 | Val mean-roc_auc_score: 0.7273
287
+ 2025-09-19 00:21:02,551 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0679 | Val mean-roc_auc_score: 0.7340
288
+ 2025-09-19 00:21:17,915 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0687 | Val mean-roc_auc_score: 0.7300
289
+ 2025-09-19 00:21:32,905 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0830 | Val mean-roc_auc_score: 0.7410
290
+ 2025-09-19 00:21:48,097 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0869 | Val mean-roc_auc_score: 0.7345
291
+ 2025-09-19 00:22:03,137 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0644 | Val mean-roc_auc_score: 0.7399
292
+ 2025-09-19 00:22:18,176 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0641 | Val mean-roc_auc_score: 0.7411
293
+ 2025-09-19 00:22:34,274 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0666 | Val mean-roc_auc_score: 0.7449
294
+ 2025-09-19 00:22:46,333 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0671 | Val mean-roc_auc_score: 0.7314
295
+ 2025-09-19 00:22:57,977 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0610 | Val mean-roc_auc_score: 0.7374
296
+ 2025-09-19 00:23:09,624 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0652 | Val mean-roc_auc_score: 0.7286
297
+ 2025-09-19 00:23:21,228 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0600 | Val mean-roc_auc_score: 0.7341
298
+ 2025-09-19 00:23:34,282 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0634 | Val mean-roc_auc_score: 0.7320
299
+ 2025-09-19 00:23:45,977 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0586 | Val mean-roc_auc_score: 0.7351
300
+ 2025-09-19 00:23:57,579 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0645 | Val mean-roc_auc_score: 0.7326
301
+ 2025-09-19 00:24:09,258 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0617 | Val mean-roc_auc_score: 0.7329
302
+ 2025-09-19 00:24:20,919 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0647 | Val mean-roc_auc_score: 0.7426
303
+ 2025-09-19 00:24:33,892 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0640 | Val mean-roc_auc_score: 0.7407
304
+ 2025-09-19 00:24:45,558 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0615 | Val mean-roc_auc_score: 0.7369
305
+ 2025-09-19 00:24:57,239 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0668 | Val mean-roc_auc_score: 0.7269
306
+ 2025-09-19 00:25:08,961 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0586 | Val mean-roc_auc_score: 0.7265
307
+ 2025-09-19 00:25:20,604 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0621 | Val mean-roc_auc_score: 0.7290
308
+ 2025-09-19 00:25:33,593 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0601 | Val mean-roc_auc_score: 0.7296
309
+ 2025-09-19 00:25:45,229 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0605 | Val mean-roc_auc_score: 0.7334
310
+ 2025-09-19 00:25:56,927 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0586 | Val mean-roc_auc_score: 0.7217
311
+ 2025-09-19 00:26:08,562 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0625 | Val mean-roc_auc_score: 0.7271
312
+ 2025-09-19 00:26:20,186 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0620 | Val mean-roc_auc_score: 0.7236
313
+ 2025-09-19 00:26:33,155 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0573 | Val mean-roc_auc_score: 0.7273
314
+ 2025-09-19 00:26:44,768 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0713 | Val mean-roc_auc_score: 0.7255
315
+ 2025-09-19 00:26:56,373 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0610 | Val mean-roc_auc_score: 0.7248
316
+ 2025-09-19 00:27:08,001 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0550 | Val mean-roc_auc_score: 0.7345
317
+ 2025-09-19 00:27:19,630 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0592 | Val mean-roc_auc_score: 0.7350
318
+ 2025-09-19 00:27:32,610 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0557 | Val mean-roc_auc_score: 0.7337
319
+ 2025-09-19 00:27:44,287 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0543 | Val mean-roc_auc_score: 0.7307
320
+ 2025-09-19 00:27:55,920 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0584 | Val mean-roc_auc_score: 0.7397
321
+ 2025-09-19 00:28:07,575 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7287
322
+ 2025-09-19 00:28:19,266 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0543 | Val mean-roc_auc_score: 0.7333
323
+ 2025-09-19 00:28:32,221 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0508 | Val mean-roc_auc_score: 0.7168
324
+ 2025-09-19 00:28:43,852 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0533 | Val mean-roc_auc_score: 0.7285
325
+ 2025-09-19 00:28:55,419 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0583 | Val mean-roc_auc_score: 0.7346
326
+ 2025-09-19 00:29:07,116 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7245
327
+ 2025-09-19 00:29:18,796 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0583 | Val mean-roc_auc_score: 0.7261
328
+ 2025-09-19 00:29:31,778 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0541 | Val mean-roc_auc_score: 0.7344
329
+ 2025-09-19 00:29:43,469 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0563 | Val mean-roc_auc_score: 0.7272
330
+ 2025-09-19 00:29:55,042 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0543 | Val mean-roc_auc_score: 0.7272
331
+ 2025-09-19 00:30:06,712 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7267
332
+ 2025-09-19 00:30:18,376 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0560 | Val mean-roc_auc_score: 0.7285
333
+ 2025-09-19 00:30:31,370 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0500 | Val mean-roc_auc_score: 0.7309
334
+ 2025-09-19 00:30:43,034 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0539 | Val mean-roc_auc_score: 0.7291
335
+ 2025-09-19 00:30:54,659 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0576 | Val mean-roc_auc_score: 0.7291
336
+ 2025-09-19 00:31:06,388 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0551 | Val mean-roc_auc_score: 0.7376
337
+ 2025-09-19 00:31:18,037 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0542 | Val mean-roc_auc_score: 0.7220
338
+ 2025-09-19 00:31:31,012 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0547 | Val mean-roc_auc_score: 0.7279
339
+ 2025-09-19 00:31:42,766 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0510 | Val mean-roc_auc_score: 0.7285
340
+ 2025-09-19 00:31:54,405 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0610 | Val mean-roc_auc_score: 0.7197
341
+ 2025-09-19 00:32:06,074 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0509 | Val mean-roc_auc_score: 0.7286
342
+ 2025-09-19 00:32:06,860 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Test mean-roc_auc_score: 0.7397
343
+ 2025-09-19 00:32:07,325 - logs_modchembert_tox21_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg mean-roc_auc_score: 0.7360, Std Dev: 0.0036
logs_modchembert_regression_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_bace_regression_epochs100_batch_size32_20250918_223910.log ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 22:39:10,756 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Running benchmark for dataset: bace_regression
2
+ 2025-09-18 22:39:10,756 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - dataset: bace_regression, tasks: ['pIC50'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 22:39:10,769 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset bace_regression at 2025-09-18_22-39-10
4
+ 2025-09-18 22:39:15,819 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.7237 | Val rms_score: 0.6409
5
+ 2025-09-18 22:39:15,819 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 38
6
+ 2025-09-18 22:39:16,386 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.6409
7
+ 2025-09-18 22:39:23,340 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3388 | Val rms_score: 0.7521
8
+ 2025-09-18 22:39:29,125 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2946 | Val rms_score: 0.7578
9
+ 2025-09-18 22:39:36,017 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2566 | Val rms_score: 0.7549
10
+ 2025-09-18 22:39:41,956 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2336 | Val rms_score: 0.6570
11
+ 2025-09-18 22:39:47,868 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2188 | Val rms_score: 0.7808
12
+ 2025-09-18 22:39:54,625 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1883 | Val rms_score: 0.7265
13
+ 2025-09-18 22:40:01,075 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1729 | Val rms_score: 0.6473
14
+ 2025-09-18 22:40:06,592 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1571 | Val rms_score: 0.6790
15
+ 2025-09-18 22:40:12,816 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1562 | Val rms_score: 0.7068
16
+ 2025-09-18 22:40:19,571 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1580 | Val rms_score: 0.6563
17
+ 2025-09-18 22:40:26,951 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1406 | Val rms_score: 0.7123
18
+ 2025-09-18 22:40:31,818 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1242 | Val rms_score: 0.7454
19
+ 2025-09-18 22:40:36,348 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1230 | Val rms_score: 0.6944
20
+ 2025-09-18 22:40:41,047 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1168 | Val rms_score: 0.7047
21
+ 2025-09-18 22:40:45,459 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1045 | Val rms_score: 0.7156
22
+ 2025-09-18 22:40:50,470 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1069 | Val rms_score: 0.6706
23
+ 2025-09-18 22:40:55,170 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1094 | Val rms_score: 0.6934
24
+ 2025-09-18 22:40:59,830 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1065 | Val rms_score: 0.6752
25
+ 2025-09-18 22:41:04,520 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0995 | Val rms_score: 0.7634
26
+ 2025-09-18 22:41:09,146 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0970 | Val rms_score: 0.6750
27
+ 2025-09-18 22:41:13,899 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0920 | Val rms_score: 0.6962
28
+ 2025-09-18 22:41:18,410 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0905 | Val rms_score: 0.7851
29
+ 2025-09-18 22:41:22,734 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0911 | Val rms_score: 0.7384
30
+ 2025-09-18 22:41:27,480 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0913 | Val rms_score: 0.7272
31
+ 2025-09-18 22:41:31,933 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0794 | Val rms_score: 0.6704
32
+ 2025-09-18 22:41:38,241 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0811 | Val rms_score: 0.8030
33
+ 2025-09-18 22:41:42,738 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.1007 | Val rms_score: 0.7625
34
+ 2025-09-18 22:41:46,978 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0918 | Val rms_score: 0.6944
35
+ 2025-09-18 22:41:51,556 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0798 | Val rms_score: 0.7743
36
+ 2025-09-18 22:41:56,007 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0777 | Val rms_score: 0.7149
37
+ 2025-09-18 22:42:00,785 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0635 | Val rms_score: 0.7014
38
+ 2025-09-18 22:42:05,237 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0654 | Val rms_score: 0.7007
39
+ 2025-09-18 22:42:09,568 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0646 | Val rms_score: 0.7283
40
+ 2025-09-18 22:42:14,111 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0672 | Val rms_score: 0.7257
41
+ 2025-09-18 22:42:18,630 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0674 | Val rms_score: 0.7592
42
+ 2025-09-18 22:42:23,379 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0671 | Val rms_score: 0.7201
43
+ 2025-09-18 22:42:27,861 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0617 | Val rms_score: 0.7072
44
+ 2025-09-18 22:42:32,142 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0654 | Val rms_score: 0.7005
45
+ 2025-09-18 22:42:36,619 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0754 | Val rms_score: 0.7132
46
+ 2025-09-18 22:42:41,178 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0633 | Val rms_score: 0.7350
47
+ 2025-09-18 22:42:45,390 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0625 | Val rms_score: 0.7026
48
+ 2025-09-18 22:42:49,966 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0620 | Val rms_score: 0.7127
49
+ 2025-09-18 22:42:54,450 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0576 | Val rms_score: 0.7182
50
+ 2025-09-18 22:42:58,998 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0652 | Val rms_score: 0.7041
51
+ 2025-09-18 22:43:03,489 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0633 | Val rms_score: 0.7484
52
+ 2025-09-18 22:43:08,239 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0584 | Val rms_score: 0.6964
53
+ 2025-09-18 22:43:12,752 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0589 | Val rms_score: 0.7380
54
+ 2025-09-18 22:43:17,164 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0572 | Val rms_score: 0.7434
55
+ 2025-09-18 22:43:21,696 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0604 | Val rms_score: 0.7314
56
+ 2025-09-18 22:43:26,226 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0563 | Val rms_score: 0.7361
57
+ 2025-09-18 22:43:31,004 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0530 | Val rms_score: 0.7613
58
+ 2025-09-18 22:43:36,677 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0592 | Val rms_score: 0.7331
59
+ 2025-09-18 22:43:41,136 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0592 | Val rms_score: 0.7531
60
+ 2025-09-18 22:43:45,570 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0547 | Val rms_score: 0.7343
61
+ 2025-09-18 22:43:50,045 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0533 | Val rms_score: 0.7079
62
+ 2025-09-18 22:43:54,759 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0535 | Val rms_score: 0.7258
63
+ 2025-09-18 22:43:59,192 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0552 | Val rms_score: 0.7251
64
+ 2025-09-18 22:44:03,463 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0654 | Val rms_score: 0.7080
65
+ 2025-09-18 22:44:07,976 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0514 | Val rms_score: 0.7674
66
+ 2025-09-18 22:44:12,503 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0525 | Val rms_score: 0.7324
67
+ 2025-09-18 22:44:17,287 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0487 | Val rms_score: 0.7373
68
+ 2025-09-18 22:44:21,272 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0500 | Val rms_score: 0.7182
69
+ 2025-09-18 22:44:26,072 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0557 | Val rms_score: 0.7402
70
+ 2025-09-18 22:44:31,234 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0504 | Val rms_score: 0.7439
71
+ 2025-09-18 22:44:35,743 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0591 | Val rms_score: 0.7008
72
+ 2025-09-18 22:44:39,937 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0502 | Val rms_score: 0.7079
73
+ 2025-09-18 22:44:44,562 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0458 | Val rms_score: 0.7335
74
+ 2025-09-18 22:44:49,632 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0469 | Val rms_score: 0.7134
75
+ 2025-09-18 22:44:53,772 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0458 | Val rms_score: 0.7449
76
+ 2025-09-18 22:44:58,125 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0514 | Val rms_score: 0.7041
77
+ 2025-09-18 22:45:03,069 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0525 | Val rms_score: 0.7023
78
+ 2025-09-18 22:45:08,066 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0471 | Val rms_score: 0.7291
79
+ 2025-09-18 22:45:13,194 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0462 | Val rms_score: 0.6944
80
+ 2025-09-18 22:45:17,949 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0448 | Val rms_score: 0.7303
81
+ 2025-09-18 22:45:21,866 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0456 | Val rms_score: 0.7476
82
+ 2025-09-18 22:45:26,716 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0472 | Val rms_score: 0.7445
83
+ 2025-09-18 22:45:31,745 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0456 | Val rms_score: 0.7154
84
+ 2025-09-18 22:45:37,715 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0398 | Val rms_score: 0.7458
85
+ 2025-09-18 22:45:42,413 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0450 | Val rms_score: 0.7594
86
+ 2025-09-18 22:45:46,783 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0442 | Val rms_score: 0.7561
87
+ 2025-09-18 22:45:51,413 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0435 | Val rms_score: 0.7216
88
+ 2025-09-18 22:45:55,920 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0415 | Val rms_score: 0.7143
89
+ 2025-09-18 22:46:00,990 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0448 | Val rms_score: 0.7407
90
+ 2025-09-18 22:46:05,644 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0448 | Val rms_score: 0.7446
91
+ 2025-09-18 22:46:10,008 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0514 | Val rms_score: 0.7565
92
+ 2025-09-18 22:46:14,655 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0599 | Val rms_score: 0.7877
93
+ 2025-09-18 22:46:19,644 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0444 | Val rms_score: 0.7151
94
+ 2025-09-18 22:46:24,096 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0389 | Val rms_score: 0.7496
95
+ 2025-09-18 22:46:28,296 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0453 | Val rms_score: 0.7669
96
+ 2025-09-18 22:46:33,072 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0428 | Val rms_score: 0.7733
97
+ 2025-09-18 22:46:38,323 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0421 | Val rms_score: 0.7503
98
+ 2025-09-18 22:46:43,625 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0411 | Val rms_score: 0.7542
99
+ 2025-09-18 22:46:48,636 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0426 | Val rms_score: 0.7159
100
+ 2025-09-18 22:46:52,651 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0430 | Val rms_score: 0.7508
101
+ 2025-09-18 22:46:57,367 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0438 | Val rms_score: 0.7426
102
+ 2025-09-18 22:47:02,653 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0461 | Val rms_score: 0.7160
103
+ 2025-09-18 22:47:07,682 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0423 | Val rms_score: 0.7306
104
+ 2025-09-18 22:47:12,268 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0434 | Val rms_score: 0.7444
105
+ 2025-09-18 22:47:16,663 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0405 | Val rms_score: 0.7832
106
+ 2025-09-18 22:47:17,213 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Test rms_score: 0.9858
107
+ 2025-09-18 22:47:17,558 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset bace_regression at 2025-09-18_22-47-17
108
+ 2025-09-18 22:47:21,449 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.6250 | Val rms_score: 0.7853
109
+ 2025-09-18 22:47:21,449 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 38
110
+ 2025-09-18 22:47:21,993 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.7853
111
+ 2025-09-18 22:47:26,295 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3273 | Val rms_score: 0.6423
112
+ 2025-09-18 22:47:26,474 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 76
113
+ 2025-09-18 22:47:27,020 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.6423
114
+ 2025-09-18 22:47:31,547 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2400 | Val rms_score: 0.6093
115
+ 2025-09-18 22:47:31,725 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 114
116
+ 2025-09-18 22:47:32,274 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.6093
117
+ 2025-09-18 22:47:36,768 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2253 | Val rms_score: 0.9181
118
+ 2025-09-18 22:47:41,530 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2138 | Val rms_score: 0.6858
119
+ 2025-09-18 22:47:46,620 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1853 | Val rms_score: 0.7366
120
+ 2025-09-18 22:47:51,894 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1776 | Val rms_score: 0.6640
121
+ 2025-09-18 22:47:56,369 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1807 | Val rms_score: 0.8109
122
+ 2025-09-18 22:48:00,949 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1530 | Val rms_score: 0.8436
123
+ 2025-09-18 22:48:05,713 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1299 | Val rms_score: 0.7094
124
+ 2025-09-18 22:48:10,234 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1328 | Val rms_score: 0.7253
125
+ 2025-09-18 22:48:15,390 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1201 | Val rms_score: 0.6851
126
+ 2025-09-18 22:48:19,808 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1201 | Val rms_score: 0.7882
127
+ 2025-09-18 22:48:23,988 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1338 | Val rms_score: 0.6953
128
+ 2025-09-18 22:48:28,739 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1110 | Val rms_score: 0.6943
129
+ 2025-09-18 22:48:33,802 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1426 | Val rms_score: 0.7623
130
+ 2025-09-18 22:48:39,240 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1032 | Val rms_score: 0.7410
131
+ 2025-09-18 22:48:44,056 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1020 | Val rms_score: 0.7396
132
+ 2025-09-18 22:48:48,386 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1016 | Val rms_score: 0.6765
133
+ 2025-09-18 22:48:52,700 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0835 | Val rms_score: 0.7154
134
+ 2025-09-18 22:48:57,520 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0831 | Val rms_score: 0.6407
135
+ 2025-09-18 22:49:02,808 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0877 | Val rms_score: 0.6327
136
+ 2025-09-18 22:49:07,917 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0765 | Val rms_score: 0.7532
137
+ 2025-09-18 22:49:12,737 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0840 | Val rms_score: 0.7847
138
+ 2025-09-18 22:49:17,098 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0765 | Val rms_score: 0.6964
139
+ 2025-09-18 22:49:21,524 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0695 | Val rms_score: 0.7403
140
+ 2025-09-18 22:49:27,444 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0751 | Val rms_score: 0.7003
141
+ 2025-09-18 22:49:32,331 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0752 | Val rms_score: 0.6954
142
+ 2025-09-18 22:49:37,496 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0732 | Val rms_score: 0.7393
143
+ 2025-09-18 22:49:42,812 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0703 | Val rms_score: 0.7621
144
+ 2025-09-18 22:49:47,705 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0707 | Val rms_score: 0.7313
145
+ 2025-09-18 22:49:52,324 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0713 | Val rms_score: 0.7520
146
+ 2025-09-18 22:49:57,079 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0646 | Val rms_score: 0.7191
147
+ 2025-09-18 22:50:01,626 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0728 | Val rms_score: 0.7130
148
+ 2025-09-18 22:50:06,314 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0648 | Val rms_score: 0.6994
149
+ 2025-09-18 22:50:10,677 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0609 | Val rms_score: 0.7558
150
+ 2025-09-18 22:50:15,369 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0664 | Val rms_score: 0.6998
151
+ 2025-09-18 22:50:19,852 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0551 | Val rms_score: 0.7428
152
+ 2025-09-18 22:50:24,951 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0580 | Val rms_score: 0.7628
153
+ 2025-09-18 22:50:30,041 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0625 | Val rms_score: 0.7271
154
+ 2025-09-18 22:50:34,813 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0641 | Val rms_score: 0.7574
155
+ 2025-09-18 22:50:39,277 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0584 | Val rms_score: 0.7142
156
+ 2025-09-18 22:50:43,764 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0584 | Val rms_score: 0.6831
157
+ 2025-09-18 22:50:48,896 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0646 | Val rms_score: 0.7584
158
+ 2025-09-18 22:50:53,397 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0691 | Val rms_score: 0.6787
159
+ 2025-09-18 22:50:57,385 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0539 | Val rms_score: 0.6964
160
+ 2025-09-18 22:51:02,338 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0485 | Val rms_score: 0.7922
161
+ 2025-09-18 22:51:07,331 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0488 | Val rms_score: 0.7558
162
+ 2025-09-18 22:51:12,436 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0483 | Val rms_score: 0.7610
163
+ 2025-09-18 22:51:16,724 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0471 | Val rms_score: 0.7393
164
+ 2025-09-18 22:51:20,970 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0471 | Val rms_score: 0.7283
165
+ 2025-09-18 22:51:25,843 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0539 | Val rms_score: 0.7395
166
+ 2025-09-18 22:51:31,411 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0661 | Val rms_score: 0.7986
167
+ 2025-09-18 22:51:36,292 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0670 | Val rms_score: 0.7396
168
+ 2025-09-18 22:51:41,412 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0609 | Val rms_score: 0.6850
169
+ 2025-09-18 22:51:46,346 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0508 | Val rms_score: 0.7245
170
+ 2025-09-18 22:51:50,962 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0436 | Val rms_score: 0.7502
171
+ 2025-09-18 22:51:55,082 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0613 | Val rms_score: 0.7223
172
+ 2025-09-18 22:52:00,132 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0432 | Val rms_score: 0.7128
173
+ 2025-09-18 22:52:05,116 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0428 | Val rms_score: 0.7634
174
+ 2025-09-18 22:52:09,658 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0423 | Val rms_score: 0.7261
175
+ 2025-09-18 22:52:14,246 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0411 | Val rms_score: 0.7656
176
+ 2025-09-18 22:52:18,550 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0407 | Val rms_score: 0.7848
177
+ 2025-09-18 22:52:23,294 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0452 | Val rms_score: 0.7282
178
+ 2025-09-18 22:52:28,478 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0448 | Val rms_score: 0.7226
179
+ 2025-09-18 22:52:33,497 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0432 | Val rms_score: 0.7359
180
+ 2025-09-18 22:52:38,447 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0471 | Val rms_score: 0.7773
181
+ 2025-09-18 22:52:42,821 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0417 | Val rms_score: 0.7585
182
+ 2025-09-18 22:52:47,182 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0469 | Val rms_score: 0.7168
183
+ 2025-09-18 22:52:52,344 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0442 | Val rms_score: 0.7915
184
+ 2025-09-18 22:52:57,260 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0401 | Val rms_score: 0.7469
185
+ 2025-09-18 22:53:01,982 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0375 | Val rms_score: 0.7210
186
+ 2025-09-18 22:53:06,241 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0407 | Val rms_score: 0.6767
187
+ 2025-09-18 22:53:10,793 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0518 | Val rms_score: 0.7158
188
+ 2025-09-18 22:53:15,568 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0471 | Val rms_score: 0.7600
189
+ 2025-09-18 22:53:20,671 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0413 | Val rms_score: 0.7613
190
+ 2025-09-18 22:53:25,783 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0439 | Val rms_score: 0.7905
191
+ 2025-09-18 22:53:30,062 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0366 | Val rms_score: 0.8062
192
+ 2025-09-18 22:53:35,375 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0552 | Val rms_score: 0.7616
193
+ 2025-09-18 22:53:39,809 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0378 | Val rms_score: 0.7355
194
+ 2025-09-18 22:53:44,350 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0393 | Val rms_score: 0.7621
195
+ 2025-09-18 22:53:49,360 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0457 | Val rms_score: 0.7313
196
+ 2025-09-18 22:53:53,907 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0399 | Val rms_score: 0.7459
197
+ 2025-09-18 22:53:57,868 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0409 | Val rms_score: 0.8095
198
+ 2025-09-18 22:54:02,653 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0427 | Val rms_score: 0.7643
199
+ 2025-09-18 22:54:07,878 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0372 | Val rms_score: 0.7979
200
+ 2025-09-18 22:54:12,984 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0365 | Val rms_score: 0.8017
201
+ 2025-09-18 22:54:17,159 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0345 | Val rms_score: 0.7506
202
+ 2025-09-18 22:54:21,488 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0395 | Val rms_score: 0.7373
203
+ 2025-09-18 22:54:26,663 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0424 | Val rms_score: 0.7339
204
+ 2025-09-18 22:54:31,176 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0382 | Val rms_score: 0.8000
205
+ 2025-09-18 22:54:35,787 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0360 | Val rms_score: 0.7468
206
+ 2025-09-18 22:54:40,007 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0347 | Val rms_score: 0.7641
207
+ 2025-09-18 22:54:44,740 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0384 | Val rms_score: 0.7763
208
+ 2025-09-18 22:54:49,951 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0367 | Val rms_score: 0.7629
209
+ 2025-09-18 22:54:54,860 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0327 | Val rms_score: 0.7534
210
+ 2025-09-18 22:54:59,646 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0339 | Val rms_score: 0.7823
211
+ 2025-09-18 22:55:03,959 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0355 | Val rms_score: 0.7914
212
+ 2025-09-18 22:55:08,553 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0421 | Val rms_score: 0.7760
213
+ 2025-09-18 22:55:13,293 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0339 | Val rms_score: 0.7703
214
+ 2025-09-18 22:55:13,857 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Test rms_score: 0.9658
215
+ 2025-09-18 22:55:14,217 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset bace_regression at 2025-09-18_22-55-14
216
+ 2025-09-18 22:55:18,878 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.6645 | Val rms_score: 0.8086
217
+ 2025-09-18 22:55:18,878 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 38
218
+ 2025-09-18 22:55:19,411 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.8086
219
+ 2025-09-18 22:55:24,613 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3339 | Val rms_score: 0.7210
220
+ 2025-09-18 22:55:24,786 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 76
221
+ 2025-09-18 22:55:25,348 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.7210
222
+ 2025-09-18 22:55:30,442 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2902 | Val rms_score: 0.7211
223
+ 2025-09-18 22:55:35,461 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2681 | Val rms_score: 0.7821
224
+ 2025-09-18 22:55:40,005 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.2336 | Val rms_score: 0.7710
225
+ 2025-09-18 22:55:44,147 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.2210 | Val rms_score: 0.7085
226
+ 2025-09-18 22:55:44,561 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 228
227
+ 2025-09-18 22:55:45,089 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.7085
228
+ 2025-09-18 22:55:49,536 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1908 | Val rms_score: 0.7682
229
+ 2025-09-18 22:55:54,078 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.2109 | Val rms_score: 0.8152
230
+ 2025-09-18 22:55:58,897 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.1735 | Val rms_score: 0.7162
231
+ 2025-09-18 22:56:03,419 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.1472 | Val rms_score: 0.7539
232
+ 2025-09-18 22:56:07,851 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1398 | Val rms_score: 0.7109
233
+ 2025-09-18 22:56:12,291 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1299 | Val rms_score: 0.7313
234
+ 2025-09-18 22:56:16,896 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1291 | Val rms_score: 0.7622
235
+ 2025-09-18 22:56:21,759 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1162 | Val rms_score: 0.8129
236
+ 2025-09-18 22:56:26,867 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1168 | Val rms_score: 0.7525
237
+ 2025-09-18 22:56:31,763 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1484 | Val rms_score: 0.8000
238
+ 2025-09-18 22:56:36,491 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1028 | Val rms_score: 0.7344
239
+ 2025-09-18 22:56:40,647 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1069 | Val rms_score: 0.7745
240
+ 2025-09-18 22:56:45,312 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0973 | Val rms_score: 0.6807
241
+ 2025-09-18 22:56:45,471 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 722
242
+ 2025-09-18 22:56:46,009 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 19 with val rms_score: 0.6807
243
+ 2025-09-18 22:56:50,709 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0991 | Val rms_score: 0.6743
244
+ 2025-09-18 22:56:50,889 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 760
245
+ 2025-09-18 22:56:51,420 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 20 with val rms_score: 0.6743
246
+ 2025-09-18 22:56:56,050 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1234 | Val rms_score: 0.6598
247
+ 2025-09-18 22:56:56,512 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Global step of best model: 798
248
+ 2025-09-18 22:56:57,043 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Best model saved at epoch 21 with val rms_score: 0.6598
249
+ 2025-09-18 22:57:01,423 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1063 | Val rms_score: 0.7453
250
+ 2025-09-18 22:57:06,090 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0979 | Val rms_score: 0.7052
251
+ 2025-09-18 22:57:11,226 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0911 | Val rms_score: 0.7377
252
+ 2025-09-18 22:57:16,097 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0822 | Val rms_score: 0.7413
253
+ 2025-09-18 22:57:20,482 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0794 | Val rms_score: 0.7499
254
+ 2025-09-18 22:57:26,492 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0805 | Val rms_score: 0.7057
255
+ 2025-09-18 22:57:30,709 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0773 | Val rms_score: 0.7362
256
+ 2025-09-18 22:57:34,990 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.1377 | Val rms_score: 0.7275
257
+ 2025-09-18 22:57:39,784 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0744 | Val rms_score: 0.7311
258
+ 2025-09-18 22:57:44,922 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0699 | Val rms_score: 0.7117
259
+ 2025-09-18 22:57:49,808 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0757 | Val rms_score: 0.7428
260
+ 2025-09-18 22:57:54,154 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0670 | Val rms_score: 0.7077
261
+ 2025-09-18 22:57:58,550 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0654 | Val rms_score: 0.7439
262
+ 2025-09-18 22:58:03,400 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0635 | Val rms_score: 0.7215
263
+ 2025-09-18 22:58:08,566 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0683 | Val rms_score: 0.7599
264
+ 2025-09-18 22:58:13,938 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0635 | Val rms_score: 0.7413
265
+ 2025-09-18 22:58:18,624 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0658 | Val rms_score: 0.7095
266
+ 2025-09-18 22:58:22,994 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0654 | Val rms_score: 0.7240
267
+ 2025-09-18 22:58:27,350 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0688 | Val rms_score: 0.6902
268
+ 2025-09-18 22:58:32,060 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0703 | Val rms_score: 0.7296
269
+ 2025-09-18 22:58:37,468 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0666 | Val rms_score: 0.6601
270
+ 2025-09-18 22:58:42,420 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0630 | Val rms_score: 0.7071
271
+ 2025-09-18 22:58:47,032 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0609 | Val rms_score: 0.6921
272
+ 2025-09-18 22:58:51,525 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0602 | Val rms_score: 0.7121
273
+ 2025-09-18 22:58:56,055 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0629 | Val rms_score: 0.7241
274
+ 2025-09-18 22:59:01,034 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0617 | Val rms_score: 0.7783
275
+ 2025-09-18 22:59:06,207 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0645 | Val rms_score: 0.7617
276
+ 2025-09-18 22:59:11,169 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0609 | Val rms_score: 0.7669
277
+ 2025-09-18 22:59:15,602 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0613 | Val rms_score: 0.7518
278
+ 2025-09-18 22:59:19,957 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0555 | Val rms_score: 0.7605
279
+ 2025-09-18 22:59:24,675 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0530 | Val rms_score: 0.7230
280
+ 2025-09-18 22:59:30,277 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0541 | Val rms_score: 0.7220
281
+ 2025-09-18 22:59:35,095 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0547 | Val rms_score: 0.7048
282
+ 2025-09-18 22:59:39,684 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0516 | Val rms_score: 0.6942
283
+ 2025-09-18 22:59:44,048 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0558 | Val rms_score: 0.7222
284
+ 2025-09-18 22:59:48,788 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0576 | Val rms_score: 0.7349
285
+ 2025-09-18 22:59:53,237 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0479 | Val rms_score: 0.6799
286
+ 2025-09-18 22:59:58,334 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0539 | Val rms_score: 0.7301
287
+ 2025-09-18 23:00:02,646 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0514 | Val rms_score: 0.7139
288
+ 2025-09-18 23:00:06,862 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0490 | Val rms_score: 0.7576
289
+ 2025-09-18 23:00:11,828 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0479 | Val rms_score: 0.7861
290
+ 2025-09-18 23:00:16,739 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0487 | Val rms_score: 0.7284
291
+ 2025-09-18 23:00:21,224 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0488 | Val rms_score: 0.7513
292
+ 2025-09-18 23:00:25,296 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0535 | Val rms_score: 0.7385
293
+ 2025-09-18 23:00:30,123 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0537 | Val rms_score: 0.7476
294
+ 2025-09-18 23:00:35,565 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0461 | Val rms_score: 0.7346
295
+ 2025-09-18 23:00:40,476 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0471 | Val rms_score: 0.7594
296
+ 2025-09-18 23:00:44,321 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0511 | Val rms_score: 0.7127
297
+ 2025-09-18 23:00:48,894 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0487 | Val rms_score: 0.7254
298
+ 2025-09-18 23:00:53,964 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0485 | Val rms_score: 0.6932
299
+ 2025-09-18 23:00:59,136 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0471 | Val rms_score: 0.7105
300
+ 2025-09-18 23:01:03,249 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0446 | Val rms_score: 0.7025
301
+ 2025-09-18 23:01:07,628 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0482 | Val rms_score: 0.7125
302
+ 2025-09-18 23:01:12,400 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0510 | Val rms_score: 0.7051
303
+ 2025-09-18 23:01:17,592 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0446 | Val rms_score: 0.7253
304
+ 2025-09-18 23:01:23,006 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0439 | Val rms_score: 0.7351
305
+ 2025-09-18 23:01:27,731 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0430 | Val rms_score: 0.7643
306
+ 2025-09-18 23:01:32,818 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0674 | Val rms_score: 0.7222
307
+ 2025-09-18 23:01:36,771 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0506 | Val rms_score: 0.7822
308
+ 2025-09-18 23:01:41,875 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0399 | Val rms_score: 0.7520
309
+ 2025-09-18 23:01:47,126 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0398 | Val rms_score: 0.7653
310
+ 2025-09-18 23:01:51,683 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0417 | Val rms_score: 0.7802
311
+ 2025-09-18 23:01:56,027 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0417 | Val rms_score: 0.7446
312
+ 2025-09-18 23:02:00,481 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0464 | Val rms_score: 0.7332
313
+ 2025-09-18 23:02:05,344 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0428 | Val rms_score: 0.7390
314
+ 2025-09-18 23:02:10,742 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0524 | Val rms_score: 0.7326
315
+ 2025-09-18 23:02:15,701 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0421 | Val rms_score: 0.7391
316
+ 2025-09-18 23:02:20,106 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0458 | Val rms_score: 0.7637
317
+ 2025-09-18 23:02:24,483 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0461 | Val rms_score: 0.7128
318
+ 2025-09-18 23:02:29,038 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0415 | Val rms_score: 0.7008
319
+ 2025-09-18 23:02:34,240 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0364 | Val rms_score: 0.7522
320
+ 2025-09-18 23:02:39,391 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0377 | Val rms_score: 0.7550
321
+ 2025-09-18 23:02:44,258 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0370 | Val rms_score: 0.7139
322
+ 2025-09-18 23:02:48,543 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0365 | Val rms_score: 0.7236
323
+ 2025-09-18 23:02:52,891 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0395 | Val rms_score: 0.7068
324
+ 2025-09-18 23:02:57,756 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0417 | Val rms_score: 0.7403
325
+ 2025-09-18 23:03:02,889 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0337 | Val rms_score: 0.7080
326
+ 2025-09-18 23:03:07,930 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0370 | Val rms_score: 0.7025
327
+ 2025-09-18 23:03:12,651 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0434 | Val rms_score: 0.7197
328
+ 2025-09-18 23:03:13,180 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Test rms_score: 1.0277
329
+ 2025-09-18 23:03:13,546 - logs_modchembert_bace_regression_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 0.9931, Std Dev: 0.0258
logs_modchembert_regression_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_clearance_epochs100_batch_size32_20250919_000714.log ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-19 00:07:14,829 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Running benchmark for dataset: clearance
2
+ 2025-09-19 00:07:14,830 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - dataset: clearance, tasks: ['target'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-19 00:07:14,842 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset clearance at 2025-09-19_00-07-14
4
+ 2025-09-19 00:07:17,559 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 2.6310 | Val rms_score: 58.6210
5
+ 2025-09-19 00:07:17,559 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 21
6
+ 2025-09-19 00:07:18,076 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 58.6210
7
+ 2025-09-19 00:07:20,952 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 1.0298 | Val rms_score: 56.0436
8
+ 2025-09-19 00:07:21,134 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 42
9
+ 2025-09-19 00:07:21,667 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 56.0436
10
+ 2025-09-19 00:07:24,679 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.8333 | Val rms_score: 54.3273
11
+ 2025-09-19 00:07:24,853 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 63
12
+ 2025-09-19 00:07:25,377 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 54.3273
13
+ 2025-09-19 00:07:28,388 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.7440 | Val rms_score: 56.0074
14
+ 2025-09-19 00:07:31,456 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.6500 | Val rms_score: 55.3287
15
+ 2025-09-19 00:07:34,122 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.5625 | Val rms_score: 56.3562
16
+ 2025-09-19 00:07:37,495 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.4911 | Val rms_score: 58.2043
17
+ 2025-09-19 00:07:40,531 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.4464 | Val rms_score: 57.0827
18
+ 2025-09-19 00:07:43,556 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.3423 | Val rms_score: 58.1382
19
+ 2025-09-19 00:07:46,637 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.3109 | Val rms_score: 57.2970
20
+ 2025-09-19 00:07:49,279 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2500 | Val rms_score: 57.2432
21
+ 2025-09-19 00:07:52,615 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.2292 | Val rms_score: 57.1089
22
+ 2025-09-19 00:07:55,690 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2054 | Val rms_score: 57.3094
23
+ 2025-09-19 00:07:58,776 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.2009 | Val rms_score: 56.9412
24
+ 2025-09-19 00:08:01,795 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1615 | Val rms_score: 56.8359
25
+ 2025-09-19 00:08:04,467 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1429 | Val rms_score: 56.6545
26
+ 2025-09-19 00:08:07,832 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1317 | Val rms_score: 56.2957
27
+ 2025-09-19 00:08:10,834 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1205 | Val rms_score: 55.8916
28
+ 2025-09-19 00:08:13,899 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1235 | Val rms_score: 55.2827
29
+ 2025-09-19 00:08:16,885 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1070 | Val rms_score: 56.1769
30
+ 2025-09-19 00:08:19,528 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1042 | Val rms_score: 56.5870
31
+ 2025-09-19 00:08:22,950 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1071 | Val rms_score: 56.8591
32
+ 2025-09-19 00:08:26,009 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.1146 | Val rms_score: 57.9472
33
+ 2025-09-19 00:08:29,067 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0967 | Val rms_score: 55.9506
34
+ 2025-09-19 00:08:32,034 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0882 | Val rms_score: 56.3288
35
+ 2025-09-19 00:08:34,650 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0893 | Val rms_score: 55.2413
36
+ 2025-09-19 00:08:38,081 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0878 | Val rms_score: 56.3887
37
+ 2025-09-19 00:08:41,079 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0833 | Val rms_score: 55.5600
38
+ 2025-09-19 00:08:44,156 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0790 | Val rms_score: 55.0717
39
+ 2025-09-19 00:08:47,220 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0830 | Val rms_score: 55.0917
40
+ 2025-09-19 00:08:49,871 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0789 | Val rms_score: 55.5178
41
+ 2025-09-19 00:08:53,308 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0785 | Val rms_score: 56.2200
42
+ 2025-09-19 00:08:56,381 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0751 | Val rms_score: 56.6292
43
+ 2025-09-19 00:08:59,446 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0753 | Val rms_score: 56.0991
44
+ 2025-09-19 00:09:02,452 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0755 | Val rms_score: 56.6028
45
+ 2025-09-19 00:09:05,115 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0707 | Val rms_score: 56.7846
46
+ 2025-09-19 00:09:08,549 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0707 | Val rms_score: 55.9633
47
+ 2025-09-19 00:09:11,611 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0640 | Val rms_score: 56.5299
48
+ 2025-09-19 00:09:14,711 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0678 | Val rms_score: 57.7201
49
+ 2025-09-19 00:09:17,744 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0685 | Val rms_score: 56.6135
50
+ 2025-09-19 00:09:20,391 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0662 | Val rms_score: 56.7955
51
+ 2025-09-19 00:09:23,865 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0662 | Val rms_score: 58.1198
52
+ 2025-09-19 00:09:26,950 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0498 | Val rms_score: 57.7498
53
+ 2025-09-19 00:09:30,085 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0640 | Val rms_score: 56.4225
54
+ 2025-09-19 00:09:33,185 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0610 | Val rms_score: 55.3442
55
+ 2025-09-19 00:09:35,881 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0577 | Val rms_score: 55.5514
56
+ 2025-09-19 00:09:39,325 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0539 | Val rms_score: 56.6321
57
+ 2025-09-19 00:09:43,273 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0410 | Val rms_score: 56.8196
58
+ 2025-09-19 00:09:46,258 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0536 | Val rms_score: 56.9805
59
+ 2025-09-19 00:09:49,299 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0558 | Val rms_score: 56.8610
60
+ 2025-09-19 00:09:51,856 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0513 | Val rms_score: 56.5846
61
+ 2025-09-19 00:09:55,315 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0558 | Val rms_score: 55.9076
62
+ 2025-09-19 00:09:58,315 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0475 | Val rms_score: 56.2028
63
+ 2025-09-19 00:10:01,395 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0521 | Val rms_score: 56.8161
64
+ 2025-09-19 00:10:04,466 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0595 | Val rms_score: 55.2794
65
+ 2025-09-19 00:10:07,075 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0606 | Val rms_score: 55.7361
66
+ 2025-09-19 00:10:10,500 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0636 | Val rms_score: 55.8035
67
+ 2025-09-19 00:10:13,511 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0599 | Val rms_score: 55.9633
68
+ 2025-09-19 00:10:16,602 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0703 | Val rms_score: 54.6558
69
+ 2025-09-19 00:10:19,598 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0640 | Val rms_score: 55.8493
70
+ 2025-09-19 00:10:22,220 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0562 | Val rms_score: 55.0640
71
+ 2025-09-19 00:10:25,591 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0532 | Val rms_score: 55.8207
72
+ 2025-09-19 00:10:28,718 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0480 | Val rms_score: 55.7940
73
+ 2025-09-19 00:10:31,757 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0577 | Val rms_score: 56.3324
74
+ 2025-09-19 00:10:34,811 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0554 | Val rms_score: 56.6949
75
+ 2025-09-19 00:10:37,469 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0510 | Val rms_score: 56.8767
76
+ 2025-09-19 00:10:40,515 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0469 | Val rms_score: 56.2380
77
+ 2025-09-19 00:10:43,559 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0532 | Val rms_score: 54.7337
78
+ 2025-09-19 00:10:46,650 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0510 | Val rms_score: 54.4703
79
+ 2025-09-19 00:10:49,711 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0432 | Val rms_score: 56.5667
80
+ 2025-09-19 00:10:52,517 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0443 | Val rms_score: 55.5260
81
+ 2025-09-19 00:10:55,515 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0433 | Val rms_score: 55.9265
82
+ 2025-09-19 00:10:58,552 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0536 | Val rms_score: 55.6607
83
+ 2025-09-19 00:11:01,619 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0413 | Val rms_score: 55.9157
84
+ 2025-09-19 00:11:04,691 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0392 | Val rms_score: 55.7992
85
+ 2025-09-19 00:11:07,754 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0385 | Val rms_score: 56.1687
86
+ 2025-09-19 00:11:10,949 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0430 | Val rms_score: 55.1791
87
+ 2025-09-19 00:11:14,025 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0452 | Val rms_score: 55.4324
88
+ 2025-09-19 00:11:17,051 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0417 | Val rms_score: 56.0430
89
+ 2025-09-19 00:11:20,122 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0439 | Val rms_score: 56.4957
90
+ 2025-09-19 00:11:23,191 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0356 | Val rms_score: 56.0014
91
+ 2025-09-19 00:11:26,139 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0432 | Val rms_score: 54.7762
92
+ 2025-09-19 00:11:29,217 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0394 | Val rms_score: 55.5676
93
+ 2025-09-19 00:11:32,302 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0392 | Val rms_score: 55.7978
94
+ 2025-09-19 00:11:35,374 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0391 | Val rms_score: 56.0407
95
+ 2025-09-19 00:11:38,391 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0314 | Val rms_score: 55.4649
96
+ 2025-09-19 00:11:41,446 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0433 | Val rms_score: 56.2025
97
+ 2025-09-19 00:11:44,457 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0402 | Val rms_score: 55.7496
98
+ 2025-09-19 00:11:47,522 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0406 | Val rms_score: 55.6187
99
+ 2025-09-19 00:11:50,595 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0396 | Val rms_score: 54.6167
100
+ 2025-09-19 00:11:53,636 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0401 | Val rms_score: 55.1975
101
+ 2025-09-19 00:11:56,591 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0452 | Val rms_score: 55.5307
102
+ 2025-09-19 00:11:59,607 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0420 | Val rms_score: 54.8134
103
+ 2025-09-19 00:12:02,662 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0353 | Val rms_score: 54.8311
104
+ 2025-09-19 00:12:05,705 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0391 | Val rms_score: 54.4069
105
+ 2025-09-19 00:12:09,687 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0364 | Val rms_score: 54.8203
106
+ 2025-09-19 00:12:12,725 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0465 | Val rms_score: 55.1857
107
+ 2025-09-19 00:12:15,799 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0407 | Val rms_score: 55.3114
108
+ 2025-09-19 00:12:18,873 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0420 | Val rms_score: 54.8672
109
+ 2025-09-19 00:12:21,888 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0344 | Val rms_score: 54.1863
110
+ 2025-09-19 00:12:22,027 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 2100
111
+ 2025-09-19 00:12:22,562 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 100 with val rms_score: 54.1863
112
+ 2025-09-19 00:12:22,935 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Test rms_score: 44.9870
113
+ 2025-09-19 00:12:23,336 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset clearance at 2025-09-19_00-12-23
114
+ 2025-09-19 00:12:26,029 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 2.4405 | Val rms_score: 55.1055
115
+ 2025-09-19 00:12:26,029 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 21
116
+ 2025-09-19 00:12:26,554 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 55.1055
117
+ 2025-09-19 00:12:29,484 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 1.0119 | Val rms_score: 53.8780
118
+ 2025-09-19 00:12:29,656 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 42
119
+ 2025-09-19 00:12:30,172 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 53.8780
120
+ 2025-09-19 00:12:33,224 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.8571 | Val rms_score: 53.0063
121
+ 2025-09-19 00:12:33,400 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 63
122
+ 2025-09-19 00:12:33,915 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 53.0063
123
+ 2025-09-19 00:12:36,954 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.7619 | Val rms_score: 54.9300
124
+ 2025-09-19 00:12:39,992 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.7250 | Val rms_score: 54.2130
125
+ 2025-09-19 00:12:42,611 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.5179 | Val rms_score: 56.5466
126
+ 2025-09-19 00:12:45,988 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.4345 | Val rms_score: 55.9367
127
+ 2025-09-19 00:12:49,046 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.3467 | Val rms_score: 55.8245
128
+ 2025-09-19 00:12:52,089 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.2842 | Val rms_score: 56.5133
129
+ 2025-09-19 00:12:55,130 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.2594 | Val rms_score: 56.6355
130
+ 2025-09-19 00:12:57,780 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.2113 | Val rms_score: 56.1184
131
+ 2025-09-19 00:13:01,177 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1860 | Val rms_score: 57.1938
132
+ 2025-09-19 00:13:04,192 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1585 | Val rms_score: 55.2399
133
+ 2025-09-19 00:13:07,258 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1496 | Val rms_score: 56.8368
134
+ 2025-09-19 00:13:10,286 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1375 | Val rms_score: 55.4018
135
+ 2025-09-19 00:13:12,949 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1317 | Val rms_score: 55.2723
136
+ 2025-09-19 00:13:16,360 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1235 | Val rms_score: 56.0034
137
+ 2025-09-19 00:13:19,361 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1079 | Val rms_score: 55.8381
138
+ 2025-09-19 00:13:22,465 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1064 | Val rms_score: 55.5440
139
+ 2025-09-19 00:13:25,509 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0918 | Val rms_score: 54.9401
140
+ 2025-09-19 00:13:28,124 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0923 | Val rms_score: 54.3789
141
+ 2025-09-19 00:13:31,525 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0807 | Val rms_score: 55.0439
142
+ 2025-09-19 00:13:34,586 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0885 | Val rms_score: 54.6429
143
+ 2025-09-19 00:13:37,655 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1138 | Val rms_score: 54.7306
144
+ 2025-09-19 00:13:40,707 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0763 | Val rms_score: 55.0819
145
+ 2025-09-19 00:13:43,292 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0711 | Val rms_score: 55.4741
146
+ 2025-09-19 00:13:46,722 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0692 | Val rms_score: 55.3287
147
+ 2025-09-19 00:13:49,731 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0755 | Val rms_score: 55.4888
148
+ 2025-09-19 00:13:52,816 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0751 | Val rms_score: 55.0988
149
+ 2025-09-19 00:13:55,902 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0759 | Val rms_score: 56.0141
150
+ 2025-09-19 00:13:58,505 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0796 | Val rms_score: 55.7271
151
+ 2025-09-19 00:14:01,886 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0714 | Val rms_score: 55.8617
152
+ 2025-09-19 00:14:04,928 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0800 | Val rms_score: 54.9897
153
+ 2025-09-19 00:14:07,989 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0614 | Val rms_score: 55.0580
154
+ 2025-09-19 00:14:11,028 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0770 | Val rms_score: 55.6453
155
+ 2025-09-19 00:14:13,796 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0681 | Val rms_score: 55.4357
156
+ 2025-09-19 00:14:17,186 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0629 | Val rms_score: 54.6916
157
+ 2025-09-19 00:14:20,254 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0573 | Val rms_score: 55.6737
158
+ 2025-09-19 00:14:23,273 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0510 | Val rms_score: 55.1714
159
+ 2025-09-19 00:14:26,271 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0525 | Val rms_score: 55.5315
160
+ 2025-09-19 00:14:28,900 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0499 | Val rms_score: 54.4333
161
+ 2025-09-19 00:14:32,344 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0577 | Val rms_score: 54.8874
162
+ 2025-09-19 00:14:34,951 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0560 | Val rms_score: 54.8566
163
+ 2025-09-19 00:14:37,980 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0610 | Val rms_score: 55.3753
164
+ 2025-09-19 00:14:41,015 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0554 | Val rms_score: 54.8580
165
+ 2025-09-19 00:14:43,637 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0471 | Val rms_score: 55.0552
166
+ 2025-09-19 00:14:47,059 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0487 | Val rms_score: 55.1418
167
+ 2025-09-19 00:14:51,080 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0464 | Val rms_score: 55.5392
168
+ 2025-09-19 00:14:54,108 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0452 | Val rms_score: 55.8563
169
+ 2025-09-19 00:14:57,128 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0461 | Val rms_score: 54.5337
170
+ 2025-09-19 00:14:59,734 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0463 | Val rms_score: 55.1270
171
+ 2025-09-19 00:15:03,096 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0502 | Val rms_score: 54.7006
172
+ 2025-09-19 00:15:06,179 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0523 | Val rms_score: 55.8805
173
+ 2025-09-19 00:15:09,283 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0558 | Val rms_score: 54.9707
174
+ 2025-09-19 00:15:12,293 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0495 | Val rms_score: 55.2300
175
+ 2025-09-19 00:15:15,108 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0474 | Val rms_score: 54.3945
176
+ 2025-09-19 00:15:18,518 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0396 | Val rms_score: 55.0308
177
+ 2025-09-19 00:15:21,583 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0430 | Val rms_score: 54.1757
178
+ 2025-09-19 00:15:24,635 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0491 | Val rms_score: 54.2708
179
+ 2025-09-19 00:15:27,676 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0521 | Val rms_score: 53.7823
180
+ 2025-09-19 00:15:30,493 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0450 | Val rms_score: 54.6447
181
+ 2025-09-19 00:15:33,838 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0366 | Val rms_score: 55.0576
182
+ 2025-09-19 00:15:36,976 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0459 | Val rms_score: 55.2682
183
+ 2025-09-19 00:15:40,045 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0404 | Val rms_score: 55.3308
184
+ 2025-09-19 00:15:43,020 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0476 | Val rms_score: 54.7646
185
+ 2025-09-19 00:15:45,819 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0506 | Val rms_score: 55.2558
186
+ 2025-09-19 00:15:49,181 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0474 | Val rms_score: 54.2032
187
+ 2025-09-19 00:15:51,921 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0491 | Val rms_score: 54.1039
188
+ 2025-09-19 00:15:54,974 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0394 | Val rms_score: 54.1537
189
+ 2025-09-19 00:15:58,015 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0406 | Val rms_score: 54.8281
190
+ 2025-09-19 00:16:00,648 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0391 | Val rms_score: 54.9662
191
+ 2025-09-19 00:16:04,043 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0498 | Val rms_score: 54.7641
192
+ 2025-09-19 00:16:07,052 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0417 | Val rms_score: 54.8222
193
+ 2025-09-19 00:16:10,080 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0409 | Val rms_score: 54.9757
194
+ 2025-09-19 00:16:13,109 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0424 | Val rms_score: 54.8932
195
+ 2025-09-19 00:16:15,720 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0450 | Val rms_score: 54.4306
196
+ 2025-09-19 00:16:19,113 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0476 | Val rms_score: 55.7953
197
+ 2025-09-19 00:16:22,160 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0374 | Val rms_score: 53.7156
198
+ 2025-09-19 00:16:25,172 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0346 | Val rms_score: 54.0006
199
+ 2025-09-19 00:16:28,181 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0340 | Val rms_score: 53.8489
200
+ 2025-09-19 00:16:30,832 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0195 | Val rms_score: 53.8390
201
+ 2025-09-19 00:16:34,333 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0335 | Val rms_score: 53.9748
202
+ 2025-09-19 00:16:37,358 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0346 | Val rms_score: 54.4456
203
+ 2025-09-19 00:16:40,453 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0357 | Val rms_score: 54.0546
204
+ 2025-09-19 00:16:43,453 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0368 | Val rms_score: 54.0007
205
+ 2025-09-19 00:16:46,135 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0342 | Val rms_score: 54.7123
206
+ 2025-09-19 00:16:49,563 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0333 | Val rms_score: 54.3258
207
+ 2025-09-19 00:16:52,588 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0337 | Val rms_score: 54.0351
208
+ 2025-09-19 00:16:55,652 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0363 | Val rms_score: 53.6733
209
+ 2025-09-19 00:16:58,741 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0339 | Val rms_score: 54.2924
210
+ 2025-09-19 00:17:01,481 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0298 | Val rms_score: 54.8266
211
+ 2025-09-19 00:17:04,926 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0331 | Val rms_score: 53.8871
212
+ 2025-09-19 00:17:07,954 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0305 | Val rms_score: 53.7868
213
+ 2025-09-19 00:17:10,622 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0346 | Val rms_score: 54.0985
214
+ 2025-09-19 00:17:13,724 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0415 | Val rms_score: 54.5692
215
+ 2025-09-19 00:17:17,184 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0410 | Val rms_score: 54.8288
216
+ 2025-09-19 00:17:20,613 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0381 | Val rms_score: 54.4439
217
+ 2025-09-19 00:17:23,652 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0350 | Val rms_score: 53.8577
218
+ 2025-09-19 00:17:26,700 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0372 | Val rms_score: 54.0408
219
+ 2025-09-19 00:17:29,753 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0374 | Val rms_score: 54.3851
220
+ 2025-09-19 00:17:30,034 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Test rms_score: 46.5008
221
+ 2025-09-19 00:17:30,446 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset clearance at 2025-09-19_00-17-30
222
+ 2025-09-19 00:17:33,114 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 1.8690 | Val rms_score: 54.7713
223
+ 2025-09-19 00:17:33,114 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 21
224
+ 2025-09-19 00:17:33,623 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 54.7713
225
+ 2025-09-19 00:17:36,655 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.9762 | Val rms_score: 53.3409
226
+ 2025-09-19 00:17:36,823 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Global step of best model: 42
227
+ 2025-09-19 00:17:37,355 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 53.3409
228
+ 2025-09-19 00:17:40,411 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.7857 | Val rms_score: 55.6647
229
+ 2025-09-19 00:17:43,464 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.6815 | Val rms_score: 57.6977
230
+ 2025-09-19 00:17:46,085 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.7063 | Val rms_score: 58.6960
231
+ 2025-09-19 00:17:49,117 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.4583 | Val rms_score: 56.7406
232
+ 2025-09-19 00:17:52,541 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.4018 | Val rms_score: 58.6042
233
+ 2025-09-19 00:17:55,622 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.3363 | Val rms_score: 59.0818
234
+ 2025-09-19 00:17:58,644 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.2589 | Val rms_score: 58.4783
235
+ 2025-09-19 00:18:01,258 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.2453 | Val rms_score: 58.6233
236
+ 2025-09-19 00:18:04,289 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1823 | Val rms_score: 58.4987
237
+ 2025-09-19 00:18:07,686 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.1853 | Val rms_score: 57.8181
238
+ 2025-09-19 00:18:10,751 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.1756 | Val rms_score: 57.3217
239
+ 2025-09-19 00:18:13,734 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1414 | Val rms_score: 58.4987
240
+ 2025-09-19 00:18:16,397 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.1375 | Val rms_score: 57.0821
241
+ 2025-09-19 00:18:19,465 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.1265 | Val rms_score: 58.1033
242
+ 2025-09-19 00:18:22,843 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.1183 | Val rms_score: 59.9925
243
+ 2025-09-19 00:18:25,836 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.1168 | Val rms_score: 58.3463
244
+ 2025-09-19 00:18:28,881 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.1057 | Val rms_score: 57.5474
245
+ 2025-09-19 00:18:31,463 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.1102 | Val rms_score: 56.1810
246
+ 2025-09-19 00:18:34,403 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.1109 | Val rms_score: 58.6024
247
+ 2025-09-19 00:18:37,865 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.1027 | Val rms_score: 58.2854
248
+ 2025-09-19 00:18:40,936 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0871 | Val rms_score: 57.0054
249
+ 2025-09-19 00:18:43,975 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.1260 | Val rms_score: 56.5352
250
+ 2025-09-19 00:18:46,568 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0826 | Val rms_score: 57.9313
251
+ 2025-09-19 00:18:49,621 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0781 | Val rms_score: 57.2070
252
+ 2025-09-19 00:18:53,104 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0707 | Val rms_score: 56.5726
253
+ 2025-09-19 00:18:56,152 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0759 | Val rms_score: 57.9899
254
+ 2025-09-19 00:18:59,226 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0781 | Val rms_score: 57.6645
255
+ 2025-09-19 00:19:01,809 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0755 | Val rms_score: 57.7858
256
+ 2025-09-19 00:19:04,893 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0792 | Val rms_score: 57.4659
257
+ 2025-09-19 00:19:08,299 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0766 | Val rms_score: 56.8036
258
+ 2025-09-19 00:19:11,418 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0807 | Val rms_score: 56.6008
259
+ 2025-09-19 00:19:14,429 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0792 | Val rms_score: 56.4370
260
+ 2025-09-19 00:19:17,041 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0714 | Val rms_score: 57.1657
261
+ 2025-09-19 00:19:20,088 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0707 | Val rms_score: 57.6095
262
+ 2025-09-19 00:19:23,512 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0763 | Val rms_score: 58.5957
263
+ 2025-09-19 00:19:26,569 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0789 | Val rms_score: 57.8625
264
+ 2025-09-19 00:19:29,609 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0703 | Val rms_score: 58.4218
265
+ 2025-09-19 00:19:32,262 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0588 | Val rms_score: 56.5446
266
+ 2025-09-19 00:19:35,244 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0640 | Val rms_score: 57.0066
267
+ 2025-09-19 00:19:38,693 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0651 | Val rms_score: 56.3678
268
+ 2025-09-19 00:19:41,752 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0671 | Val rms_score: 56.6458
269
+ 2025-09-19 00:19:44,406 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0558 | Val rms_score: 56.8125
270
+ 2025-09-19 00:19:47,054 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0569 | Val rms_score: 58.4339
271
+ 2025-09-19 00:19:49,981 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0536 | Val rms_score: 57.4476
272
+ 2025-09-19 00:19:53,448 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0465 | Val rms_score: 56.5773
273
+ 2025-09-19 00:19:57,404 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0527 | Val rms_score: 55.9138
274
+ 2025-09-19 00:20:00,391 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0547 | Val rms_score: 58.1696
275
+ 2025-09-19 00:20:03,068 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0487 | Val rms_score: 56.5133
276
+ 2025-09-19 00:20:06,108 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0495 | Val rms_score: 57.0860
277
+ 2025-09-19 00:20:09,525 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0487 | Val rms_score: 56.8379
278
+ 2025-09-19 00:20:12,569 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0508 | Val rms_score: 55.8451
279
+ 2025-09-19 00:20:15,616 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0474 | Val rms_score: 56.4737
280
+ 2025-09-19 00:20:18,275 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0461 | Val rms_score: 55.1506
281
+ 2025-09-19 00:20:21,315 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0461 | Val rms_score: 57.7400
282
+ 2025-09-19 00:20:24,722 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0461 | Val rms_score: 55.9402
283
+ 2025-09-19 00:20:27,746 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0464 | Val rms_score: 55.6605
284
+ 2025-09-19 00:20:30,792 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0469 | Val rms_score: 54.8209
285
+ 2025-09-19 00:20:33,620 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0506 | Val rms_score: 57.2097
286
+ 2025-09-19 00:20:36,703 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0435 | Val rms_score: 57.9541
287
+ 2025-09-19 00:20:40,112 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0391 | Val rms_score: 57.0171
288
+ 2025-09-19 00:20:43,226 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0450 | Val rms_score: 56.2230
289
+ 2025-09-19 00:20:46,220 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0461 | Val rms_score: 56.6077
290
+ 2025-09-19 00:20:49,062 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0383 | Val rms_score: 55.9995
291
+ 2025-09-19 00:20:52,101 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0430 | Val rms_score: 55.4984
292
+ 2025-09-19 00:20:55,533 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0432 | Val rms_score: 55.3520
293
+ 2025-09-19 00:20:58,619 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0368 | Val rms_score: 56.2986
294
+ 2025-09-19 00:21:01,189 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0415 | Val rms_score: 55.7600
295
+ 2025-09-19 00:21:03,791 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0502 | Val rms_score: 56.1889
296
+ 2025-09-19 00:21:06,849 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0491 | Val rms_score: 55.1075
297
+ 2025-09-19 00:21:10,247 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0560 | Val rms_score: 56.9935
298
+ 2025-09-19 00:21:13,304 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0443 | Val rms_score: 56.6346
299
+ 2025-09-19 00:21:16,358 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0392 | Val rms_score: 55.8721
300
+ 2025-09-19 00:21:18,997 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0465 | Val rms_score: 57.0274
301
+ 2025-09-19 00:21:22,011 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0433 | Val rms_score: 55.8134
302
+ 2025-09-19 00:21:25,412 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0437 | Val rms_score: 55.8375
303
+ 2025-09-19 00:21:28,546 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0484 | Val rms_score: 56.8958
304
+ 2025-09-19 00:21:31,550 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0491 | Val rms_score: 54.7416
305
+ 2025-09-19 00:21:34,169 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0441 | Val rms_score: 56.1436
306
+ 2025-09-19 00:21:37,188 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0352 | Val rms_score: 54.5926
307
+ 2025-09-19 00:21:40,577 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0409 | Val rms_score: 55.4352
308
+ 2025-09-19 00:21:43,607 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0357 | Val rms_score: 55.8262
309
+ 2025-09-19 00:21:46,718 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0400 | Val rms_score: 56.0345
310
+ 2025-09-19 00:21:49,409 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0350 | Val rms_score: 55.4117
311
+ 2025-09-19 00:21:52,451 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0322 | Val rms_score: 55.5280
312
+ 2025-09-19 00:21:55,841 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0337 | Val rms_score: 54.7562
313
+ 2025-09-19 00:21:58,898 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0432 | Val rms_score: 55.1613
314
+ 2025-09-19 00:22:01,879 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0441 | Val rms_score: 55.6702
315
+ 2025-09-19 00:22:04,548 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0389 | Val rms_score: 55.1573
316
+ 2025-09-19 00:22:07,613 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0410 | Val rms_score: 55.1591
317
+ 2025-09-19 00:22:11,087 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0422 | Val rms_score: 55.6311
318
+ 2025-09-19 00:22:14,113 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0335 | Val rms_score: 55.1437
319
+ 2025-09-19 00:22:17,102 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0327 | Val rms_score: 55.9503
320
+ 2025-09-19 00:22:19,715 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0303 | Val rms_score: 55.0201
321
+ 2025-09-19 00:22:23,531 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0309 | Val rms_score: 55.5183
322
+ 2025-09-19 00:22:26,951 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0286 | Val rms_score: 55.5789
323
+ 2025-09-19 00:22:29,990 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0326 | Val rms_score: 55.5041
324
+ 2025-09-19 00:22:33,057 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0327 | Val rms_score: 55.6429
325
+ 2025-09-19 00:22:35,680 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0311 | Val rms_score: 55.2897
326
+ 2025-09-19 00:22:36,089 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Test rms_score: 44.9974
327
+ 2025-09-19 00:22:36,493 - logs_modchembert_clearance_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 45.4951, Std Dev: 0.7112
logs_modchembert_regression_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_delaney_epochs100_batch_size32_20250918_222034.log ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 22:20:34,354 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Running benchmark for dataset: delaney
2
+ 2025-09-18 22:20:34,354 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - dataset: delaney, tasks: ['measured_log_solubility_in_mols_per_litre'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 22:20:34,368 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset delaney at 2025-09-18_22-20-34
4
+ 2025-09-18 22:20:41,424 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.4720 | Val rms_score: 1.3735
5
+ 2025-09-18 22:20:41,424 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 29
6
+ 2025-09-18 22:20:41,954 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.3735
7
+ 2025-09-18 22:20:45,229 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1390 | Val rms_score: 1.1244
8
+ 2025-09-18 22:20:45,404 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 58
9
+ 2025-09-18 22:20:45,924 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.1244
10
+ 2025-09-18 22:20:48,935 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0873 | Val rms_score: 1.0412
11
+ 2025-09-18 22:20:49,112 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 87
12
+ 2025-09-18 22:20:49,629 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 1.0412
13
+ 2025-09-18 22:20:52,374 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0601 | Val rms_score: 0.9534
14
+ 2025-09-18 22:20:52,571 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 116
15
+ 2025-09-18 22:20:53,085 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.9534
16
+ 2025-09-18 22:20:56,127 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0536 | Val rms_score: 0.9448
17
+ 2025-09-18 22:20:56,308 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 145
18
+ 2025-09-18 22:20:56,824 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.9448
19
+ 2025-09-18 22:20:59,737 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0531 | Val rms_score: 0.9595
20
+ 2025-09-18 22:21:03,125 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0651 | Val rms_score: 0.9593
21
+ 2025-09-18 22:21:06,517 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0450 | Val rms_score: 0.9164
22
+ 2025-09-18 22:21:06,702 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 232
23
+ 2025-09-18 22:21:07,212 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 0.9164
24
+ 2025-09-18 22:21:10,313 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0412 | Val rms_score: 0.8910
25
+ 2025-09-18 22:21:10,499 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 261
26
+ 2025-09-18 22:21:11,022 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 0.8910
27
+ 2025-09-18 22:21:14,112 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0358 | Val rms_score: 0.9646
28
+ 2025-09-18 22:21:17,131 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0327 | Val rms_score: 0.9234
29
+ 2025-09-18 22:21:20,452 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0333 | Val rms_score: 0.8844
30
+ 2025-09-18 22:21:20,629 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 348
31
+ 2025-09-18 22:21:21,142 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val rms_score: 0.8844
32
+ 2025-09-18 22:21:24,360 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0292 | Val rms_score: 0.8936
33
+ 2025-09-18 22:21:27,713 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0273 | Val rms_score: 0.8795
34
+ 2025-09-18 22:21:27,894 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 406
35
+ 2025-09-18 22:21:28,413 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val rms_score: 0.8795
36
+ 2025-09-18 22:21:31,810 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0314 | Val rms_score: 0.9080
37
+ 2025-09-18 22:21:35,126 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0269 | Val rms_score: 0.9465
38
+ 2025-09-18 22:21:38,430 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0265 | Val rms_score: 0.9122
39
+ 2025-09-18 22:21:41,414 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0279 | Val rms_score: 0.9182
40
+ 2025-09-18 22:21:44,439 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0269 | Val rms_score: 0.9031
41
+ 2025-09-18 22:21:47,584 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0256 | Val rms_score: 0.9497
42
+ 2025-09-18 22:21:50,540 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0230 | Val rms_score: 0.8990
43
+ 2025-09-18 22:21:53,818 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0210 | Val rms_score: 0.8800
44
+ 2025-09-18 22:21:56,932 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0213 | Val rms_score: 0.9359
45
+ 2025-09-18 22:22:00,266 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0236 | Val rms_score: 0.9576
46
+ 2025-09-18 22:22:03,672 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0302 | Val rms_score: 0.9987
47
+ 2025-09-18 22:22:06,659 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0466 | Val rms_score: 0.9320
48
+ 2025-09-18 22:22:09,884 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0314 | Val rms_score: 0.9130
49
+ 2025-09-18 22:22:13,024 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0207 | Val rms_score: 0.9205
50
+ 2025-09-18 22:22:16,399 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0211 | Val rms_score: 0.9217
51
+ 2025-09-18 22:22:19,885 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0180 | Val rms_score: 0.9229
52
+ 2025-09-18 22:22:23,015 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0193 | Val rms_score: 0.9118
53
+ 2025-09-18 22:22:26,175 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0198 | Val rms_score: 0.9409
54
+ 2025-09-18 22:22:29,370 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0201 | Val rms_score: 0.9351
55
+ 2025-09-18 22:22:32,634 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0249 | Val rms_score: 0.9227
56
+ 2025-09-18 22:22:36,568 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0194 | Val rms_score: 0.9424
57
+ 2025-09-18 22:22:39,540 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0190 | Val rms_score: 0.9220
58
+ 2025-09-18 22:22:42,849 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0221 | Val rms_score: 0.9549
59
+ 2025-09-18 22:22:46,006 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0239 | Val rms_score: 0.9380
60
+ 2025-09-18 22:22:49,468 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0260 | Val rms_score: 0.8943
61
+ 2025-09-18 22:22:52,737 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0190 | Val rms_score: 0.9117
62
+ 2025-09-18 22:22:55,756 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0194 | Val rms_score: 0.9096
63
+ 2025-09-18 22:22:59,029 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0176 | Val rms_score: 0.9123
64
+ 2025-09-18 22:23:02,213 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0172 | Val rms_score: 0.9125
65
+ 2025-09-18 22:23:05,587 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0155 | Val rms_score: 0.9082
66
+ 2025-09-18 22:23:08,814 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0238 | Val rms_score: 0.9257
67
+ 2025-09-18 22:23:11,526 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0206 | Val rms_score: 0.9841
68
+ 2025-09-18 22:23:14,899 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0185 | Val rms_score: 0.9502
69
+ 2025-09-18 22:23:18,105 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0157 | Val rms_score: 0.9287
70
+ 2025-09-18 22:23:21,037 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0166 | Val rms_score: 0.9490
71
+ 2025-09-18 22:23:24,109 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0162 | Val rms_score: 0.9179
72
+ 2025-09-18 22:23:27,385 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0162 | Val rms_score: 0.9022
73
+ 2025-09-18 22:23:31,057 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0244 | Val rms_score: 0.8869
74
+ 2025-09-18 22:23:34,294 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0174 | Val rms_score: 0.9221
75
+ 2025-09-18 22:23:37,187 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0162 | Val rms_score: 0.9157
76
+ 2025-09-18 22:23:40,309 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0185 | Val rms_score: 0.9133
77
+ 2025-09-18 22:23:43,609 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0210 | Val rms_score: 0.9272
78
+ 2025-09-18 22:23:47,317 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0201 | Val rms_score: 0.9437
79
+ 2025-09-18 22:23:50,468 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0271 | Val rms_score: 0.9462
80
+ 2025-09-18 22:23:53,425 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0174 | Val rms_score: 0.9463
81
+ 2025-09-18 22:23:56,591 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0164 | Val rms_score: 0.9462
82
+ 2025-09-18 22:23:59,984 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0182 | Val rms_score: 0.9486
83
+ 2025-09-18 22:24:03,730 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0163 | Val rms_score: 0.9354
84
+ 2025-09-18 22:24:06,975 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0145 | Val rms_score: 0.9339
85
+ 2025-09-18 22:24:09,816 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0164 | Val rms_score: 0.9343
86
+ 2025-09-18 22:24:12,964 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0199 | Val rms_score: 0.9675
87
+ 2025-09-18 22:24:16,071 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0186 | Val rms_score: 0.9331
88
+ 2025-09-18 22:24:19,424 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0168 | Val rms_score: 0.9348
89
+ 2025-09-18 22:24:22,458 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0165 | Val rms_score: 0.9131
90
+ 2025-09-18 22:24:26,553 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0115 | Val rms_score: 0.9525
91
+ 2025-09-18 22:24:29,660 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0138 | Val rms_score: 0.9364
92
+ 2025-09-18 22:24:32,991 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0136 | Val rms_score: 0.9640
93
+ 2025-09-18 22:24:36,679 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0143 | Val rms_score: 0.9583
94
+ 2025-09-18 22:24:39,899 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0149 | Val rms_score: 0.9548
95
+ 2025-09-18 22:24:42,780 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0172 | Val rms_score: 0.9464
96
+ 2025-09-18 22:24:45,920 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0168 | Val rms_score: 0.9290
97
+ 2025-09-18 22:24:49,291 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0119 | Val rms_score: 0.9800
98
+ 2025-09-18 22:24:53,030 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0170 | Val rms_score: 0.9748
99
+ 2025-09-18 22:24:56,180 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0174 | Val rms_score: 0.9646
100
+ 2025-09-18 22:24:59,170 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0147 | Val rms_score: 0.9566
101
+ 2025-09-18 22:25:02,266 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0147 | Val rms_score: 0.9332
102
+ 2025-09-18 22:25:05,596 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0158 | Val rms_score: 0.9491
103
+ 2025-09-18 22:25:09,279 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0156 | Val rms_score: 0.9300
104
+ 2025-09-18 22:25:12,438 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0134 | Val rms_score: 0.9310
105
+ 2025-09-18 22:25:15,463 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0150 | Val rms_score: 0.9600
106
+ 2025-09-18 22:25:18,584 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0131 | Val rms_score: 0.9575
107
+ 2025-09-18 22:25:21,983 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0143 | Val rms_score: 0.9508
108
+ 2025-09-18 22:25:25,745 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0156 | Val rms_score: 0.9460
109
+ 2025-09-18 22:25:28,960 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0154 | Val rms_score: 0.9580
110
+ 2025-09-18 22:25:31,772 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0168 | Val rms_score: 0.9253
111
+ 2025-09-18 22:25:34,990 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0168 | Val rms_score: 0.9627
112
+ 2025-09-18 22:25:38,451 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0164 | Val rms_score: 0.9670
113
+ 2025-09-18 22:25:42,120 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0139 | Val rms_score: 0.9548
114
+ 2025-09-18 22:25:45,259 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0125 | Val rms_score: 0.9189
115
+ 2025-09-18 22:25:48,240 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0125 | Val rms_score: 0.9196
116
+ 2025-09-18 22:25:51,326 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0135 | Val rms_score: 0.9267
117
+ 2025-09-18 22:25:54,732 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0133 | Val rms_score: 0.9458
118
+ 2025-09-18 22:25:58,455 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0146 | Val rms_score: 0.9133
119
+ 2025-09-18 22:26:01,538 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0186 | Val rms_score: 0.9128
120
+ 2025-09-18 22:26:04,450 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0144 | Val rms_score: 0.9696
121
+ 2025-09-18 22:26:07,866 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0130 | Val rms_score: 0.9519
122
+ 2025-09-18 22:26:08,209 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Test rms_score: 0.9156
123
+ 2025-09-18 22:26:08,487 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset delaney at 2025-09-18_22-26-08
124
+ 2025-09-18 22:26:11,510 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.3707 | Val rms_score: 1.2768
125
+ 2025-09-18 22:26:11,510 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 29
126
+ 2025-09-18 22:26:12,031 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.2768
127
+ 2025-09-18 22:26:15,208 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.0981 | Val rms_score: 1.0301
128
+ 2025-09-18 22:26:15,385 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 58
129
+ 2025-09-18 22:26:15,911 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.0301
130
+ 2025-09-18 22:26:18,997 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0695 | Val rms_score: 0.9816
131
+ 2025-09-18 22:26:19,175 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 87
132
+ 2025-09-18 22:26:19,702 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.9816
133
+ 2025-09-18 22:26:22,727 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0591 | Val rms_score: 0.9941
134
+ 2025-09-18 22:26:25,752 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0560 | Val rms_score: 1.0051
135
+ 2025-09-18 22:26:29,039 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0501 | Val rms_score: 0.8793
136
+ 2025-09-18 22:26:29,478 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 174
137
+ 2025-09-18 22:26:29,998 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.8793
138
+ 2025-09-18 22:26:33,226 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0371 | Val rms_score: 0.9300
139
+ 2025-09-18 22:26:36,699 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0383 | Val rms_score: 0.9277
140
+ 2025-09-18 22:26:39,944 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0348 | Val rms_score: 0.9343
141
+ 2025-09-18 22:26:42,908 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0317 | Val rms_score: 0.9003
142
+ 2025-09-18 22:26:45,975 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0294 | Val rms_score: 0.8847
143
+ 2025-09-18 22:26:49,412 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0303 | Val rms_score: 0.9565
144
+ 2025-09-18 22:26:52,811 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0447 | Val rms_score: 1.0166
145
+ 2025-09-18 22:26:56,027 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0335 | Val rms_score: 0.9534
146
+ 2025-09-18 22:26:58,926 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0279 | Val rms_score: 0.9697
147
+ 2025-09-18 22:27:02,200 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0253 | Val rms_score: 0.9025
148
+ 2025-09-18 22:27:05,846 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0238 | Val rms_score: 0.9150
149
+ 2025-09-18 22:27:09,462 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0323 | Val rms_score: 0.9272
150
+ 2025-09-18 22:27:12,689 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0280 | Val rms_score: 0.8922
151
+ 2025-09-18 22:27:15,643 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0233 | Val rms_score: 0.8992
152
+ 2025-09-18 22:27:19,151 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0226 | Val rms_score: 0.8988
153
+ 2025-09-18 22:27:23,223 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0195 | Val rms_score: 0.9349
154
+ 2025-09-18 22:27:27,400 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0242 | Val rms_score: 0.8797
155
+ 2025-09-18 22:27:31,225 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0202 | Val rms_score: 0.8783
156
+ 2025-09-18 22:27:31,372 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 696
157
+ 2025-09-18 22:27:31,919 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 24 with val rms_score: 0.8783
158
+ 2025-09-18 22:27:35,626 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0228 | Val rms_score: 0.9445
159
+ 2025-09-18 22:27:39,771 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0356 | Val rms_score: 1.0527
160
+ 2025-09-18 22:27:43,838 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0327 | Val rms_score: 0.9692
161
+ 2025-09-18 22:27:47,523 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0239 | Val rms_score: 0.9002
162
+ 2025-09-18 22:27:50,733 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0199 | Val rms_score: 0.9162
163
+ 2025-09-18 22:27:54,534 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0207 | Val rms_score: 0.9088
164
+ 2025-09-18 22:27:59,472 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0214 | Val rms_score: 0.9182
165
+ 2025-09-18 22:28:04,045 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0206 | Val rms_score: 0.8775
166
+ 2025-09-18 22:28:04,210 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 928
167
+ 2025-09-18 22:28:04,866 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 32 with val rms_score: 0.8775
168
+ 2025-09-18 22:28:08,809 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0183 | Val rms_score: 0.9585
169
+ 2025-09-18 22:28:13,132 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0203 | Val rms_score: 0.9072
170
+ 2025-09-18 22:28:18,199 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0199 | Val rms_score: 0.9265
171
+ 2025-09-18 22:28:22,084 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0203 | Val rms_score: 0.8978
172
+ 2025-09-18 22:28:26,605 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0271 | Val rms_score: 0.9060
173
+ 2025-09-18 22:28:31,386 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0195 | Val rms_score: 0.9546
174
+ 2025-09-18 22:28:35,722 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0206 | Val rms_score: 0.9537
175
+ 2025-09-18 22:28:39,394 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0170 | Val rms_score: 0.9191
176
+ 2025-09-18 22:28:44,112 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0169 | Val rms_score: 0.9292
177
+ 2025-09-18 22:28:48,497 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0219 | Val rms_score: 0.9580
178
+ 2025-09-18 22:28:52,418 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0190 | Val rms_score: 0.9292
179
+ 2025-09-18 22:28:56,938 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0172 | Val rms_score: 0.9218
180
+ 2025-09-18 22:29:01,106 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0169 | Val rms_score: 0.8915
181
+ 2025-09-18 22:29:05,398 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0171 | Val rms_score: 0.9224
182
+ 2025-09-18 22:29:09,594 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0156 | Val rms_score: 0.9341
183
+ 2025-09-18 22:29:14,153 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0154 | Val rms_score: 0.9556
184
+ 2025-09-18 22:29:18,809 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0154 | Val rms_score: 0.9167
185
+ 2025-09-18 22:29:22,256 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0205 | Val rms_score: 0.9146
186
+ 2025-09-18 22:29:25,419 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0232 | Val rms_score: 0.8988
187
+ 2025-09-18 22:29:28,971 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0151 | Val rms_score: 0.9193
188
+ 2025-09-18 22:29:32,393 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0144 | Val rms_score: 0.8856
189
+ 2025-09-18 22:29:35,991 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0156 | Val rms_score: 0.9057
190
+ 2025-09-18 22:29:39,305 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0190 | Val rms_score: 0.9118
191
+ 2025-09-18 22:29:42,473 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0171 | Val rms_score: 0.9074
192
+ 2025-09-18 22:29:46,126 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0211 | Val rms_score: 0.9199
193
+ 2025-09-18 22:29:49,675 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0160 | Val rms_score: 0.9416
194
+ 2025-09-18 22:29:53,153 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0165 | Val rms_score: 0.9263
195
+ 2025-09-18 22:29:56,249 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0152 | Val rms_score: 0.9238
196
+ 2025-09-18 22:29:59,375 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0149 | Val rms_score: 0.9548
197
+ 2025-09-18 22:30:03,033 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0225 | Val rms_score: 0.9590
198
+ 2025-09-18 22:30:06,631 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0161 | Val rms_score: 0.9314
199
+ 2025-09-18 22:30:10,181 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0195 | Val rms_score: 0.9484
200
+ 2025-09-18 22:30:13,406 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0150 | Val rms_score: 0.8903
201
+ 2025-09-18 22:30:16,701 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0167 | Val rms_score: 0.9156
202
+ 2025-09-18 22:30:20,580 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0175 | Val rms_score: 0.9508
203
+ 2025-09-18 22:30:24,232 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0190 | Val rms_score: 0.9840
204
+ 2025-09-18 22:30:28,903 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0295 | Val rms_score: 0.9254
205
+ 2025-09-18 22:30:32,584 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0197 | Val rms_score: 0.9308
206
+ 2025-09-18 22:30:36,204 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0153 | Val rms_score: 0.9174
207
+ 2025-09-18 22:30:39,887 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0147 | Val rms_score: 0.9560
208
+ 2025-09-18 22:30:43,449 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0138 | Val rms_score: 0.9682
209
+ 2025-09-18 22:30:47,134 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0130 | Val rms_score: 0.9261
210
+ 2025-09-18 22:30:50,905 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0123 | Val rms_score: 0.9162
211
+ 2025-09-18 22:30:54,715 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0110 | Val rms_score: 0.9288
212
+ 2025-09-18 22:30:58,957 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0134 | Val rms_score: 0.9047
213
+ 2025-09-18 22:31:02,514 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0156 | Val rms_score: 0.9029
214
+ 2025-09-18 22:31:06,339 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0123 | Val rms_score: 0.9135
215
+ 2025-09-18 22:31:10,727 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0156 | Val rms_score: 0.9418
216
+ 2025-09-18 22:31:14,737 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0143 | Val rms_score: 0.9294
217
+ 2025-09-18 22:31:19,920 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0139 | Val rms_score: 0.9456
218
+ 2025-09-18 22:31:23,346 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0136 | Val rms_score: 0.9203
219
+ 2025-09-18 22:31:26,505 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0125 | Val rms_score: 0.8853
220
+ 2025-09-18 22:31:29,919 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0141 | Val rms_score: 0.9319
221
+ 2025-09-18 22:31:33,575 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0165 | Val rms_score: 0.9554
222
+ 2025-09-18 22:31:37,549 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0144 | Val rms_score: 0.9225
223
+ 2025-09-18 22:31:40,941 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0137 | Val rms_score: 0.9491
224
+ 2025-09-18 22:31:44,145 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0120 | Val rms_score: 0.9448
225
+ 2025-09-18 22:31:47,537 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0107 | Val rms_score: 0.9601
226
+ 2025-09-18 22:31:50,924 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0136 | Val rms_score: 0.9610
227
+ 2025-09-18 22:31:54,321 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0140 | Val rms_score: 0.9699
228
+ 2025-09-18 22:31:57,201 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0141 | Val rms_score: 0.9350
229
+ 2025-09-18 22:32:00,376 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0135 | Val rms_score: 0.9586
230
+ 2025-09-18 22:32:03,748 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0106 | Val rms_score: 0.9405
231
+ 2025-09-18 22:32:07,175 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0120 | Val rms_score: 0.9202
232
+ 2025-09-18 22:32:10,621 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0171 | Val rms_score: 0.9397
233
+ 2025-09-18 22:32:13,631 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0121 | Val rms_score: 0.9763
234
+ 2025-09-18 22:32:16,836 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0136 | Val rms_score: 0.9697
235
+ 2025-09-18 22:32:20,248 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0137 | Val rms_score: 0.9635
236
+ 2025-09-18 22:32:20,669 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Test rms_score: 0.9278
237
+ 2025-09-18 22:32:20,940 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset delaney at 2025-09-18_22-32-20
238
+ 2025-09-18 22:32:24,018 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5366 | Val rms_score: 1.3129
239
+ 2025-09-18 22:32:24,018 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 29
240
+ 2025-09-18 22:32:24,543 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.3129
241
+ 2025-09-18 22:32:28,022 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1110 | Val rms_score: 1.1682
242
+ 2025-09-18 22:32:28,196 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 58
243
+ 2025-09-18 22:32:28,712 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.1682
244
+ 2025-09-18 22:32:31,971 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0814 | Val rms_score: 1.1399
245
+ 2025-09-18 22:32:32,154 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 87
246
+ 2025-09-18 22:32:32,678 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 1.1399
247
+ 2025-09-18 22:32:35,966 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0688 | Val rms_score: 1.1499
248
+ 2025-09-18 22:32:38,933 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0625 | Val rms_score: 1.0803
249
+ 2025-09-18 22:32:39,104 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 145
250
+ 2025-09-18 22:32:39,619 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 1.0803
251
+ 2025-09-18 22:32:42,486 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0504 | Val rms_score: 1.0485
252
+ 2025-09-18 22:32:42,917 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 174
253
+ 2025-09-18 22:32:43,439 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 1.0485
254
+ 2025-09-18 22:32:46,440 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0609 | Val rms_score: 1.0303
255
+ 2025-09-18 22:32:46,616 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 203
256
+ 2025-09-18 22:32:47,135 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val rms_score: 1.0303
257
+ 2025-09-18 22:32:49,877 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0520 | Val rms_score: 1.0566
258
+ 2025-09-18 22:32:53,179 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0401 | Val rms_score: 1.0269
259
+ 2025-09-18 22:32:53,360 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 261
260
+ 2025-09-18 22:32:53,885 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 1.0269
261
+ 2025-09-18 22:32:57,217 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0372 | Val rms_score: 1.0018
262
+ 2025-09-18 22:32:57,400 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 290
263
+ 2025-09-18 22:32:57,942 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 10 with val rms_score: 1.0018
264
+ 2025-09-18 22:33:01,320 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0399 | Val rms_score: 1.0372
265
+ 2025-09-18 22:33:05,149 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0385 | Val rms_score: 1.0114
266
+ 2025-09-18 22:33:08,688 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0345 | Val rms_score: 1.0118
267
+ 2025-09-18 22:33:12,013 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0345 | Val rms_score: 1.0001
268
+ 2025-09-18 22:33:12,205 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 406
269
+ 2025-09-18 22:33:12,800 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val rms_score: 1.0001
270
+ 2025-09-18 22:33:16,334 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0296 | Val rms_score: 0.9757
271
+ 2025-09-18 22:33:16,513 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 435
272
+ 2025-09-18 22:33:17,069 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 15 with val rms_score: 0.9757
273
+ 2025-09-18 22:33:20,797 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0249 | Val rms_score: 0.9911
274
+ 2025-09-18 22:33:24,629 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0277 | Val rms_score: 0.9972
275
+ 2025-09-18 22:33:28,897 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0259 | Val rms_score: 1.0143
276
+ 2025-09-18 22:33:33,131 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0260 | Val rms_score: 1.0371
277
+ 2025-09-18 22:33:36,956 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0312 | Val rms_score: 0.9976
278
+ 2025-09-18 22:33:40,689 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0244 | Val rms_score: 0.9953
279
+ 2025-09-18 22:33:45,308 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0268 | Val rms_score: 1.0068
280
+ 2025-09-18 22:33:50,259 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0237 | Val rms_score: 0.9974
281
+ 2025-09-18 22:33:53,940 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0234 | Val rms_score: 1.0214
282
+ 2025-09-18 22:33:58,161 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0214 | Val rms_score: 0.9841
283
+ 2025-09-18 22:34:02,519 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0214 | Val rms_score: 0.9892
284
+ 2025-09-18 22:34:07,426 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0236 | Val rms_score: 0.9695
285
+ 2025-09-18 22:34:07,568 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 783
286
+ 2025-09-18 22:34:08,126 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 27 with val rms_score: 0.9695
287
+ 2025-09-18 22:34:11,844 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0216 | Val rms_score: 0.9761
288
+ 2025-09-18 22:34:16,075 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0195 | Val rms_score: 0.9745
289
+ 2025-09-18 22:34:20,139 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0199 | Val rms_score: 0.9804
290
+ 2025-09-18 22:34:24,827 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0220 | Val rms_score: 0.9983
291
+ 2025-09-18 22:34:28,731 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0342 | Val rms_score: 1.0206
292
+ 2025-09-18 22:34:33,037 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0277 | Val rms_score: 0.9910
293
+ 2025-09-18 22:34:37,270 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0252 | Val rms_score: 1.0324
294
+ 2025-09-18 22:34:41,780 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0241 | Val rms_score: 0.9435
295
+ 2025-09-18 22:34:41,937 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 1015
296
+ 2025-09-18 22:34:42,479 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 35 with val rms_score: 0.9435
297
+ 2025-09-18 22:34:46,053 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0206 | Val rms_score: 0.9557
298
+ 2025-09-18 22:34:50,067 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0187 | Val rms_score: 0.9598
299
+ 2025-09-18 22:34:53,660 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0167 | Val rms_score: 0.9781
300
+ 2025-09-18 22:34:57,089 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0169 | Val rms_score: 0.9891
301
+ 2025-09-18 22:35:00,270 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0172 | Val rms_score: 0.9798
302
+ 2025-09-18 22:35:03,667 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0191 | Val rms_score: 0.9779
303
+ 2025-09-18 22:35:07,581 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0219 | Val rms_score: 0.9916
304
+ 2025-09-18 22:35:11,277 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0185 | Val rms_score: 0.9850
305
+ 2025-09-18 22:35:14,806 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0191 | Val rms_score: 1.0186
306
+ 2025-09-18 22:35:18,101 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0200 | Val rms_score: 0.9843
307
+ 2025-09-18 22:35:21,549 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0178 | Val rms_score: 1.0108
308
+ 2025-09-18 22:35:25,292 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0187 | Val rms_score: 1.0042
309
+ 2025-09-18 22:35:29,204 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0168 | Val rms_score: 0.9756
310
+ 2025-09-18 22:35:33,186 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0176 | Val rms_score: 0.9820
311
+ 2025-09-18 22:35:36,603 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0210 | Val rms_score: 1.0328
312
+ 2025-09-18 22:35:40,079 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0190 | Val rms_score: 0.9940
313
+ 2025-09-18 22:35:43,874 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0203 | Val rms_score: 0.9430
314
+ 2025-09-18 22:35:44,025 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 1508
315
+ 2025-09-18 22:35:44,614 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 52 with val rms_score: 0.9430
316
+ 2025-09-18 22:35:48,243 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0241 | Val rms_score: 0.9625
317
+ 2025-09-18 22:35:51,562 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0198 | Val rms_score: 0.9948
318
+ 2025-09-18 22:35:54,689 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0209 | Val rms_score: 0.9937
319
+ 2025-09-18 22:35:58,488 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0216 | Val rms_score: 0.9831
320
+ 2025-09-18 22:36:02,708 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0187 | Val rms_score: 0.9917
321
+ 2025-09-18 22:36:07,318 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0245 | Val rms_score: 0.9895
322
+ 2025-09-18 22:36:12,173 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0161 | Val rms_score: 0.9745
323
+ 2025-09-18 22:36:15,741 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0147 | Val rms_score: 0.9711
324
+ 2025-09-18 22:36:19,252 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0145 | Val rms_score: 0.9512
325
+ 2025-09-18 22:36:24,178 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0147 | Val rms_score: 0.9563
326
+ 2025-09-18 22:36:29,073 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0149 | Val rms_score: 0.9741
327
+ 2025-09-18 22:36:33,002 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0194 | Val rms_score: 0.9806
328
+ 2025-09-18 22:36:37,099 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0183 | Val rms_score: 0.9387
329
+ 2025-09-18 22:36:37,255 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 1885
330
+ 2025-09-18 22:36:37,862 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 65 with val rms_score: 0.9387
331
+ 2025-09-18 22:36:42,614 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0160 | Val rms_score: 0.9573
332
+ 2025-09-18 22:36:47,897 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0153 | Val rms_score: 0.9961
333
+ 2025-09-18 22:36:52,167 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0151 | Val rms_score: 1.0052
334
+ 2025-09-18 22:36:57,324 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0165 | Val rms_score: 0.9485
335
+ 2025-09-18 22:37:01,788 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0147 | Val rms_score: 0.9558
336
+ 2025-09-18 22:37:06,076 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0152 | Val rms_score: 0.9599
337
+ 2025-09-18 22:37:11,068 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0133 | Val rms_score: 0.9537
338
+ 2025-09-18 22:37:14,965 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0149 | Val rms_score: 0.9711
339
+ 2025-09-18 22:37:19,610 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0131 | Val rms_score: 0.9673
340
+ 2025-09-18 22:37:23,678 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0149 | Val rms_score: 0.9421
341
+ 2025-09-18 22:37:28,181 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0155 | Val rms_score: 0.9406
342
+ 2025-09-18 22:37:32,715 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0164 | Val rms_score: 0.9501
343
+ 2025-09-18 22:37:37,254 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0160 | Val rms_score: 0.9329
344
+ 2025-09-18 22:37:37,428 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 2262
345
+ 2025-09-18 22:37:38,038 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 78 with val rms_score: 0.9329
346
+ 2025-09-18 22:37:42,949 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0214 | Val rms_score: 0.9384
347
+ 2025-09-18 22:37:47,914 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0182 | Val rms_score: 0.9538
348
+ 2025-09-18 22:37:52,106 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0156 | Val rms_score: 0.9417
349
+ 2025-09-18 22:37:56,094 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0127 | Val rms_score: 0.9562
350
+ 2025-09-18 22:38:01,054 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0142 | Val rms_score: 0.9566
351
+ 2025-09-18 22:38:05,347 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0119 | Val rms_score: 0.9246
352
+ 2025-09-18 22:38:05,500 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 2436
353
+ 2025-09-18 22:38:06,049 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 84 with val rms_score: 0.9246
354
+ 2025-09-18 22:38:09,963 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0121 | Val rms_score: 0.9420
355
+ 2025-09-18 22:38:14,193 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0144 | Val rms_score: 0.9485
356
+ 2025-09-18 22:38:18,654 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0133 | Val rms_score: 0.9284
357
+ 2025-09-18 22:38:23,213 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0129 | Val rms_score: 0.9693
358
+ 2025-09-18 22:38:27,372 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0154 | Val rms_score: 0.9462
359
+ 2025-09-18 22:38:32,238 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0125 | Val rms_score: 0.9752
360
+ 2025-09-18 22:38:35,989 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0126 | Val rms_score: 0.9783
361
+ 2025-09-18 22:38:40,685 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0130 | Val rms_score: 0.9512
362
+ 2025-09-18 22:38:44,797 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0133 | Val rms_score: 0.9480
363
+ 2025-09-18 22:38:48,223 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0149 | Val rms_score: 0.9766
364
+ 2025-09-18 22:38:51,559 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0149 | Val rms_score: 0.9524
365
+ 2025-09-18 22:38:55,013 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0125 | Val rms_score: 0.9508
366
+ 2025-09-18 22:38:58,591 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0142 | Val rms_score: 0.9340
367
+ 2025-09-18 22:39:02,413 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0128 | Val rms_score: 0.9244
368
+ 2025-09-18 22:39:02,569 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Global step of best model: 2842
369
+ 2025-09-18 22:39:03,147 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Best model saved at epoch 98 with val rms_score: 0.9244
370
+ 2025-09-18 22:39:06,668 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0129 | Val rms_score: 0.9286
371
+ 2025-09-18 22:39:09,978 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0137 | Val rms_score: 0.9545
372
+ 2025-09-18 22:39:10,428 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Test rms_score: 0.9523
373
+ 2025-09-18 22:39:10,755 - logs_modchembert_delaney_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 0.9319, Std Dev: 0.0153
logs_modchembert_regression_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_freesolv_epochs100_batch_size32_20250918_230313.log ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 23:03:13,547 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Running benchmark for dataset: freesolv
2
+ 2025-09-18 23:03:13,547 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - dataset: freesolv, tasks: ['y'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 23:03:13,559 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset freesolv at 2025-09-18_23-03-13
4
+ 2025-09-18 23:03:15,533 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.8382 | Val rms_score: 1.2039
5
+ 2025-09-18 23:03:15,533 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 17
6
+ 2025-09-18 23:03:16,055 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.2039
7
+ 2025-09-18 23:03:18,592 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.2013 | Val rms_score: 1.1002
8
+ 2025-09-18 23:03:18,769 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 34
9
+ 2025-09-18 23:03:19,296 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.1002
10
+ 2025-09-18 23:03:21,828 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1167 | Val rms_score: 0.9702
11
+ 2025-09-18 23:03:22,016 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 51
12
+ 2025-09-18 23:03:22,535 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.9702
13
+ 2025-09-18 23:03:24,689 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1057 | Val rms_score: 0.8872
14
+ 2025-09-18 23:03:24,875 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 68
15
+ 2025-09-18 23:03:25,402 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.8872
16
+ 2025-09-18 23:03:27,915 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0873 | Val rms_score: 0.8713
17
+ 2025-09-18 23:03:28,090 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 85
18
+ 2025-09-18 23:03:28,652 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.8713
19
+ 2025-09-18 23:03:30,819 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0547 | Val rms_score: 0.8732
20
+ 2025-09-18 23:03:33,639 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0524 | Val rms_score: 0.9184
21
+ 2025-09-18 23:03:35,895 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0434 | Val rms_score: 0.8718
22
+ 2025-09-18 23:03:38,364 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0370 | Val rms_score: 0.8951
23
+ 2025-09-18 23:03:40,589 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0326 | Val rms_score: 0.8877
24
+ 2025-09-18 23:03:43,144 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0676 | Val rms_score: 0.7704
25
+ 2025-09-18 23:03:43,604 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 187
26
+ 2025-09-18 23:03:44,131 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 11 with val rms_score: 0.7704
27
+ 2025-09-18 23:03:46,516 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.4141 | Val rms_score: 1.0735
28
+ 2025-09-18 23:03:48,933 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.2022 | Val rms_score: 0.9842
29
+ 2025-09-18 23:03:51,139 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.1089 | Val rms_score: 0.8970
30
+ 2025-09-18 23:03:53,627 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0528 | Val rms_score: 0.9075
31
+ 2025-09-18 23:03:55,751 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0407 | Val rms_score: 0.9391
32
+ 2025-09-18 23:03:58,564 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0333 | Val rms_score: 0.9349
33
+ 2025-09-18 23:04:00,793 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0247 | Val rms_score: 0.9168
34
+ 2025-09-18 23:04:03,255 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0296 | Val rms_score: 0.9278
35
+ 2025-09-18 23:04:05,318 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0310 | Val rms_score: 0.9230
36
+ 2025-09-18 23:04:07,825 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0331 | Val rms_score: 0.9420
37
+ 2025-09-18 23:04:10,355 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0384 | Val rms_score: 0.9309
38
+ 2025-09-18 23:04:12,920 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0293 | Val rms_score: 0.9198
39
+ 2025-09-18 23:04:15,062 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0378 | Val rms_score: 0.9389
40
+ 2025-09-18 23:04:17,574 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0263 | Val rms_score: 0.9285
41
+ 2025-09-18 23:04:19,700 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0221 | Val rms_score: 0.9131
42
+ 2025-09-18 23:04:22,494 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0196 | Val rms_score: 0.9200
43
+ 2025-09-18 23:04:24,750 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0202 | Val rms_score: 0.9082
44
+ 2025-09-18 23:04:27,289 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0177 | Val rms_score: 0.9020
45
+ 2025-09-18 23:04:29,483 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0197 | Val rms_score: 0.9191
46
+ 2025-09-18 23:04:32,299 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0207 | Val rms_score: 0.9364
47
+ 2025-09-18 23:04:35,150 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0277 | Val rms_score: 0.9358
48
+ 2025-09-18 23:04:37,966 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0245 | Val rms_score: 0.9261
49
+ 2025-09-18 23:04:40,420 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0206 | Val rms_score: 0.9140
50
+ 2025-09-18 23:04:42,769 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0167 | Val rms_score: 0.9054
51
+ 2025-09-18 23:04:45,603 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0171 | Val rms_score: 0.9196
52
+ 2025-09-18 23:04:48,475 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0250 | Val rms_score: 0.9223
53
+ 2025-09-18 23:04:51,367 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0221 | Val rms_score: 0.9050
54
+ 2025-09-18 23:04:53,907 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0205 | Val rms_score: 0.9153
55
+ 2025-09-18 23:04:56,536 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0261 | Val rms_score: 0.9223
56
+ 2025-09-18 23:04:58,894 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0194 | Val rms_score: 0.9005
57
+ 2025-09-18 23:05:01,948 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0215 | Val rms_score: 0.8960
58
+ 2025-09-18 23:05:04,298 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0234 | Val rms_score: 0.9567
59
+ 2025-09-18 23:05:07,178 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0485 | Val rms_score: 0.9198
60
+ 2025-09-18 23:05:09,371 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0240 | Val rms_score: 0.9196
61
+ 2025-09-18 23:05:12,158 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0175 | Val rms_score: 0.9137
62
+ 2025-09-18 23:05:15,027 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0149 | Val rms_score: 0.9073
63
+ 2025-09-18 23:05:17,625 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0192 | Val rms_score: 0.9009
64
+ 2025-09-18 23:05:20,440 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0202 | Val rms_score: 0.9136
65
+ 2025-09-18 23:05:23,039 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0176 | Val rms_score: 0.9044
66
+ 2025-09-18 23:05:25,696 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0154 | Val rms_score: 0.8890
67
+ 2025-09-18 23:05:28,396 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0533 | Val rms_score: 0.9130
68
+ 2025-09-18 23:05:31,171 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0693 | Val rms_score: 0.9465
69
+ 2025-09-18 23:05:33,540 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0657 | Val rms_score: 0.9291
70
+ 2025-09-18 23:05:36,406 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0329 | Val rms_score: 0.9117
71
+ 2025-09-18 23:05:38,775 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0252 | Val rms_score: 0.9208
72
+ 2025-09-18 23:05:41,946 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0263 | Val rms_score: 0.9475
73
+ 2025-09-18 23:05:44,298 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0535 | Val rms_score: 0.9214
74
+ 2025-09-18 23:05:48,012 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0161 | Val rms_score: 0.9249
75
+ 2025-09-18 23:05:50,426 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0153 | Val rms_score: 0.9365
76
+ 2025-09-18 23:05:53,082 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0137 | Val rms_score: 0.9329
77
+ 2025-09-18 23:05:55,834 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0139 | Val rms_score: 0.9229
78
+ 2025-09-18 23:05:58,753 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0131 | Val rms_score: 0.9197
79
+ 2025-09-18 23:06:01,137 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0124 | Val rms_score: 0.9355
80
+ 2025-09-18 23:06:04,026 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0076 | Val rms_score: 0.9333
81
+ 2025-09-18 23:06:06,335 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0111 | Val rms_score: 0.9261
82
+ 2025-09-18 23:06:09,501 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0110 | Val rms_score: 0.9286
83
+ 2025-09-18 23:06:11,741 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0106 | Val rms_score: 0.9171
84
+ 2025-09-18 23:06:14,680 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0106 | Val rms_score: 0.9140
85
+ 2025-09-18 23:06:16,970 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0107 | Val rms_score: 0.9191
86
+ 2025-09-18 23:06:19,627 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0124 | Val rms_score: 0.9199
87
+ 2025-09-18 23:06:22,234 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0112 | Val rms_score: 0.9234
88
+ 2025-09-18 23:06:25,139 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0177 | Val rms_score: 0.9103
89
+ 2025-09-18 23:06:27,449 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0191 | Val rms_score: 0.8952
90
+ 2025-09-18 23:06:30,262 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0262 | Val rms_score: 0.9999
91
+ 2025-09-18 23:06:32,620 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0924 | Val rms_score: 0.9998
92
+ 2025-09-18 23:06:35,786 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0321 | Val rms_score: 1.0302
93
+ 2025-09-18 23:06:38,253 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0213 | Val rms_score: 1.0187
94
+ 2025-09-18 23:06:41,178 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0184 | Val rms_score: 1.0128
95
+ 2025-09-18 23:06:43,668 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0173 | Val rms_score: 1.0085
96
+ 2025-09-18 23:06:46,541 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0169 | Val rms_score: 1.0385
97
+ 2025-09-18 23:06:49,144 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0310 | Val rms_score: 1.0220
98
+ 2025-09-18 23:06:52,024 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0168 | Val rms_score: 1.0092
99
+ 2025-09-18 23:06:54,365 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0151 | Val rms_score: 0.9968
100
+ 2025-09-18 23:06:57,177 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0134 | Val rms_score: 0.9810
101
+ 2025-09-18 23:06:59,674 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0121 | Val rms_score: 0.9856
102
+ 2025-09-18 23:07:02,881 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0115 | Val rms_score: 0.9904
103
+ 2025-09-18 23:07:05,169 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0123 | Val rms_score: 0.9937
104
+ 2025-09-18 23:07:08,071 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0162 | Val rms_score: 0.9771
105
+ 2025-09-18 23:07:10,512 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0143 | Val rms_score: 0.9713
106
+ 2025-09-18 23:07:13,089 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0114 | Val rms_score: 0.9766
107
+ 2025-09-18 23:07:15,792 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0097 | Val rms_score: 0.9728
108
+ 2025-09-18 23:07:18,318 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0091 | Val rms_score: 0.9814
109
+ 2025-09-18 23:07:20,583 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0098 | Val rms_score: 0.9793
110
+ 2025-09-18 23:07:23,115 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0101 | Val rms_score: 0.9780
111
+ 2025-09-18 23:07:25,287 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0115 | Val rms_score: 0.9833
112
+ 2025-09-18 23:07:28,097 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0164 | Val rms_score: 0.9734
113
+ 2025-09-18 23:07:30,173 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0130 | Val rms_score: 0.9684
114
+ 2025-09-18 23:07:32,712 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0102 | Val rms_score: 0.9666
115
+ 2025-09-18 23:07:34,797 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0101 | Val rms_score: 0.9649
116
+ 2025-09-18 23:07:35,236 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Test rms_score: 0.5853
117
+ 2025-09-18 23:07:35,570 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset freesolv at 2025-09-18_23-07-35
118
+ 2025-09-18 23:07:37,712 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.5037 | Val rms_score: 1.0934
119
+ 2025-09-18 23:07:37,712 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 17
120
+ 2025-09-18 23:07:38,260 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.0934
121
+ 2025-09-18 23:07:40,644 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.1415 | Val rms_score: 1.0098
122
+ 2025-09-18 23:07:40,819 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 34
123
+ 2025-09-18 23:07:41,348 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 1.0098
124
+ 2025-09-18 23:07:43,670 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.0786 | Val rms_score: 0.9485
125
+ 2025-09-18 23:07:43,852 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 51
126
+ 2025-09-18 23:07:44,371 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.9485
127
+ 2025-09-18 23:07:46,852 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.0630 | Val rms_score: 0.9196
128
+ 2025-09-18 23:07:47,034 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 68
129
+ 2025-09-18 23:07:47,549 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.9196
130
+ 2025-09-18 23:07:49,807 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0625 | Val rms_score: 0.9225
131
+ 2025-09-18 23:07:52,300 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0508 | Val rms_score: 0.9813
132
+ 2025-09-18 23:07:55,004 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0547 | Val rms_score: 0.9416
133
+ 2025-09-18 23:07:57,500 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0441 | Val rms_score: 0.9299
134
+ 2025-09-18 23:08:00,230 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0352 | Val rms_score: 0.9328
135
+ 2025-09-18 23:08:03,564 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0322 | Val rms_score: 0.9530
136
+ 2025-09-18 23:08:05,734 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0286 | Val rms_score: 0.9447
137
+ 2025-09-18 23:08:08,563 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0315 | Val rms_score: 0.9918
138
+ 2025-09-18 23:08:10,936 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0375 | Val rms_score: 0.9473
139
+ 2025-09-18 23:08:14,317 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0270 | Val rms_score: 0.9380
140
+ 2025-09-18 23:08:16,963 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0324 | Val rms_score: 0.9460
141
+ 2025-09-18 23:08:19,674 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0254 | Val rms_score: 0.9924
142
+ 2025-09-18 23:08:22,935 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0222 | Val rms_score: 0.9679
143
+ 2025-09-18 23:08:26,281 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0190 | Val rms_score: 0.9812
144
+ 2025-09-18 23:08:28,707 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0234 | Val rms_score: 0.9781
145
+ 2025-09-18 23:08:32,091 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0409 | Val rms_score: 0.9818
146
+ 2025-09-18 23:08:35,267 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0319 | Val rms_score: 0.9322
147
+ 2025-09-18 23:08:38,326 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0276 | Val rms_score: 0.9872
148
+ 2025-09-18 23:08:40,672 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0249 | Val rms_score: 0.9507
149
+ 2025-09-18 23:08:43,184 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0172 | Val rms_score: 0.9418
150
+ 2025-09-18 23:08:45,322 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0236 | Val rms_score: 0.8503
151
+ 2025-09-18 23:08:45,464 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 425
152
+ 2025-09-18 23:08:46,009 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 25 with val rms_score: 0.8503
153
+ 2025-09-18 23:08:48,462 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.1002 | Val rms_score: 0.7786
154
+ 2025-09-18 23:08:48,938 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 442
155
+ 2025-09-18 23:08:49,483 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 26 with val rms_score: 0.7786
156
+ 2025-09-18 23:08:51,940 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.2059 | Val rms_score: 0.8507
157
+ 2025-09-18 23:08:54,142 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0809 | Val rms_score: 0.9458
158
+ 2025-09-18 23:08:56,311 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0634 | Val rms_score: 0.9031
159
+ 2025-09-18 23:08:58,783 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0311 | Val rms_score: 0.8658
160
+ 2025-09-18 23:09:00,920 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0263 | Val rms_score: 0.8731
161
+ 2025-09-18 23:09:03,673 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0225 | Val rms_score: 0.8543
162
+ 2025-09-18 23:09:05,983 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0190 | Val rms_score: 0.8551
163
+ 2025-09-18 23:09:08,484 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0185 | Val rms_score: 0.8418
164
+ 2025-09-18 23:09:10,703 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0210 | Val rms_score: 0.8574
165
+ 2025-09-18 23:09:13,216 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0430 | Val rms_score: 0.7706
166
+ 2025-09-18 23:09:13,676 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 612
167
+ 2025-09-18 23:09:14,224 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 36 with val rms_score: 0.7706
168
+ 2025-09-18 23:09:16,583 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.2188 | Val rms_score: 1.0516
169
+ 2025-09-18 23:09:19,020 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0928 | Val rms_score: 1.0265
170
+ 2025-09-18 23:09:21,117 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0974 | Val rms_score: 1.0061
171
+ 2025-09-18 23:09:23,588 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0391 | Val rms_score: 0.9399
172
+ 2025-09-18 23:09:25,640 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0236 | Val rms_score: 0.9441
173
+ 2025-09-18 23:09:28,438 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0170 | Val rms_score: 0.9314
174
+ 2025-09-18 23:09:30,524 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0182 | Val rms_score: 0.9318
175
+ 2025-09-18 23:09:33,011 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0193 | Val rms_score: 0.9362
176
+ 2025-09-18 23:09:35,216 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0160 | Val rms_score: 0.9302
177
+ 2025-09-18 23:09:37,414 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0137 | Val rms_score: 0.9210
178
+ 2025-09-18 23:09:40,163 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0195 | Val rms_score: 0.9053
179
+ 2025-09-18 23:09:42,540 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0162 | Val rms_score: 0.9094
180
+ 2025-09-18 23:09:44,961 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0137 | Val rms_score: 0.9124
181
+ 2025-09-18 23:09:47,279 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0178 | Val rms_score: 0.9313
182
+ 2025-09-18 23:09:49,704 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0735 | Val rms_score: 0.9164
183
+ 2025-09-18 23:09:52,561 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0455 | Val rms_score: 0.8735
184
+ 2025-09-18 23:09:54,802 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0439 | Val rms_score: 0.9690
185
+ 2025-09-18 23:09:57,308 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0457 | Val rms_score: 0.9354
186
+ 2025-09-18 23:09:59,464 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0232 | Val rms_score: 0.9209
187
+ 2025-09-18 23:10:01,963 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0173 | Val rms_score: 0.8958
188
+ 2025-09-18 23:10:04,383 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0165 | Val rms_score: 0.9009
189
+ 2025-09-18 23:10:06,846 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0226 | Val rms_score: 0.9378
190
+ 2025-09-18 23:10:09,902 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0396 | Val rms_score: 0.9261
191
+ 2025-09-18 23:10:12,432 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0221 | Val rms_score: 0.9156
192
+ 2025-09-18 23:10:14,913 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0154 | Val rms_score: 0.9160
193
+ 2025-09-18 23:10:17,515 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0149 | Val rms_score: 0.9111
194
+ 2025-09-18 23:10:19,724 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0122 | Val rms_score: 0.9001
195
+ 2025-09-18 23:10:22,232 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0117 | Val rms_score: 0.9190
196
+ 2025-09-18 23:10:24,359 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0085 | Val rms_score: 0.9070
197
+ 2025-09-18 23:10:26,860 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0176 | Val rms_score: 0.8893
198
+ 2025-09-18 23:10:29,299 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0441 | Val rms_score: 0.9029
199
+ 2025-09-18 23:10:31,801 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0188 | Val rms_score: 0.8955
200
+ 2025-09-18 23:10:34,048 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0117 | Val rms_score: 0.9009
201
+ 2025-09-18 23:10:36,599 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0107 | Val rms_score: 0.9103
202
+ 2025-09-18 23:10:38,728 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0105 | Val rms_score: 0.9155
203
+ 2025-09-18 23:10:41,551 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0132 | Val rms_score: 0.9199
204
+ 2025-09-18 23:10:43,665 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0083 | Val rms_score: 0.9068
205
+ 2025-09-18 23:10:46,177 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0091 | Val rms_score: 0.9165
206
+ 2025-09-18 23:10:48,267 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0089 | Val rms_score: 0.8972
207
+ 2025-09-18 23:10:50,836 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0089 | Val rms_score: 0.9056
208
+ 2025-09-18 23:10:53,340 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0090 | Val rms_score: 0.9062
209
+ 2025-09-18 23:10:55,908 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0177 | Val rms_score: 0.9093
210
+ 2025-09-18 23:10:58,076 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0094 | Val rms_score: 0.9074
211
+ 2025-09-18 23:11:00,584 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0074 | Val rms_score: 0.9035
212
+ 2025-09-18 23:11:02,832 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0073 | Val rms_score: 0.9007
213
+ 2025-09-18 23:11:05,647 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0067 | Val rms_score: 0.8995
214
+ 2025-09-18 23:11:07,967 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0067 | Val rms_score: 0.9005
215
+ 2025-09-18 23:11:10,566 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0070 | Val rms_score: 0.8997
216
+ 2025-09-18 23:11:12,969 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0098 | Val rms_score: 0.9083
217
+ 2025-09-18 23:11:15,547 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0142 | Val rms_score: 0.9356
218
+ 2025-09-18 23:11:18,478 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0551 | Val rms_score: 0.9189
219
+ 2025-09-18 23:11:21,414 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0248 | Val rms_score: 0.9482
220
+ 2025-09-18 23:11:23,844 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0186 | Val rms_score: 0.9259
221
+ 2025-09-18 23:11:26,804 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0165 | Val rms_score: 0.9417
222
+ 2025-09-18 23:11:29,140 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0112 | Val rms_score: 0.9325
223
+ 2025-09-18 23:11:32,349 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0133 | Val rms_score: 0.9154
224
+ 2025-09-18 23:11:34,722 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0165 | Val rms_score: 0.8860
225
+ 2025-09-18 23:11:37,624 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0192 | Val rms_score: 0.8927
226
+ 2025-09-18 23:11:39,852 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0115 | Val rms_score: 0.8805
227
+ 2025-09-18 23:11:42,717 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0094 | Val rms_score: 0.8825
228
+ 2025-09-18 23:11:45,368 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0079 | Val rms_score: 0.8832
229
+ 2025-09-18 23:11:48,288 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0087 | Val rms_score: 0.8877
230
+ 2025-09-18 23:11:50,783 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0087 | Val rms_score: 0.8812
231
+ 2025-09-18 23:11:53,674 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0080 | Val rms_score: 0.8879
232
+ 2025-09-18 23:11:54,255 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Test rms_score: 0.5350
233
+ 2025-09-18 23:11:54,589 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset freesolv at 2025-09-18_23-11-54
234
+ 2025-09-18 23:11:56,600 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 1.2059 | Val rms_score: 1.2840
235
+ 2025-09-18 23:11:56,600 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 17
236
+ 2025-09-18 23:11:57,118 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 1.2840
237
+ 2025-09-18 23:11:59,787 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.2335 | Val rms_score: 0.9836
238
+ 2025-09-18 23:11:59,955 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 34
239
+ 2025-09-18 23:12:00,493 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.9836
240
+ 2025-09-18 23:12:03,389 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.1415 | Val rms_score: 0.8801
241
+ 2025-09-18 23:12:03,567 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 51
242
+ 2025-09-18 23:12:04,098 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.8801
243
+ 2025-09-18 23:12:06,525 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1025 | Val rms_score: 0.8906
244
+ 2025-09-18 23:12:08,869 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.0786 | Val rms_score: 0.8590
245
+ 2025-09-18 23:12:09,051 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 85
246
+ 2025-09-18 23:12:09,591 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.8590
247
+ 2025-09-18 23:12:12,356 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.0879 | Val rms_score: 0.8742
248
+ 2025-09-18 23:12:15,629 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.0882 | Val rms_score: 0.9338
249
+ 2025-09-18 23:12:18,315 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1645 | Val rms_score: 0.9360
250
+ 2025-09-18 23:12:21,163 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0873 | Val rms_score: 0.8882
251
+ 2025-09-18 23:12:23,862 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0869 | Val rms_score: 0.9158
252
+ 2025-09-18 23:12:26,702 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.1172 | Val rms_score: 0.9235
253
+ 2025-09-18 23:12:29,447 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0334 | Val rms_score: 0.9077
254
+ 2025-09-18 23:12:31,985 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0634 | Val rms_score: 0.9117
255
+ 2025-09-18 23:12:34,777 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0846 | Val rms_score: 0.8992
256
+ 2025-09-18 23:12:37,528 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0489 | Val rms_score: 0.9014
257
+ 2025-09-18 23:12:40,276 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0432 | Val rms_score: 0.8908
258
+ 2025-09-18 23:12:43,024 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0372 | Val rms_score: 0.9163
259
+ 2025-09-18 23:12:45,431 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0420 | Val rms_score: 0.8919
260
+ 2025-09-18 23:12:48,172 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0379 | Val rms_score: 0.8641
261
+ 2025-09-18 23:12:50,910 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0335 | Val rms_score: 0.8812
262
+ 2025-09-18 23:12:53,705 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0294 | Val rms_score: 0.8760
263
+ 2025-09-18 23:12:56,775 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0284 | Val rms_score: 0.8918
264
+ 2025-09-18 23:12:59,623 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0301 | Val rms_score: 0.8901
265
+ 2025-09-18 23:13:02,012 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0334 | Val rms_score: 0.8824
266
+ 2025-09-18 23:13:04,816 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0237 | Val rms_score: 0.8771
267
+ 2025-09-18 23:13:07,626 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0217 | Val rms_score: 0.8746
268
+ 2025-09-18 23:13:10,697 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0216 | Val rms_score: 0.8764
269
+ 2025-09-18 23:13:13,448 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0280 | Val rms_score: 0.8715
270
+ 2025-09-18 23:13:16,066 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0231 | Val rms_score: 0.8875
271
+ 2025-09-18 23:13:18,328 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0201 | Val rms_score: 0.8769
272
+ 2025-09-18 23:13:21,099 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0222 | Val rms_score: 0.8744
273
+ 2025-09-18 23:13:24,127 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0193 | Val rms_score: 0.8745
274
+ 2025-09-18 23:13:26,856 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0195 | Val rms_score: 0.8700
275
+ 2025-09-18 23:13:29,656 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0179 | Val rms_score: 0.8739
276
+ 2025-09-18 23:13:32,214 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0179 | Val rms_score: 0.8653
277
+ 2025-09-18 23:13:34,544 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0202 | Val rms_score: 0.8994
278
+ 2025-09-18 23:13:37,662 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0177 | Val rms_score: 0.8958
279
+ 2025-09-18 23:13:40,534 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0232 | Val rms_score: 0.8900
280
+ 2025-09-18 23:13:43,274 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0177 | Val rms_score: 0.8661
281
+ 2025-09-18 23:13:46,103 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0193 | Val rms_score: 0.8758
282
+ 2025-09-18 23:13:48,512 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0173 | Val rms_score: 0.8649
283
+ 2025-09-18 23:13:51,316 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0179 | Val rms_score: 0.8797
284
+ 2025-09-18 23:13:53,861 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0199 | Val rms_score: 0.8522
285
+ 2025-09-18 23:13:54,018 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 731
286
+ 2025-09-18 23:13:54,560 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 43 with val rms_score: 0.8522
287
+ 2025-09-18 23:13:57,324 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0177 | Val rms_score: 0.8807
288
+ 2025-09-18 23:14:00,121 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0195 | Val rms_score: 0.8775
289
+ 2025-09-18 23:14:02,825 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0170 | Val rms_score: 0.8872
290
+ 2025-09-18 23:14:05,714 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0164 | Val rms_score: 0.8918
291
+ 2025-09-18 23:14:07,849 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0167 | Val rms_score: 0.8612
292
+ 2025-09-18 23:14:10,661 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0187 | Val rms_score: 0.8798
293
+ 2025-09-18 23:14:13,523 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0175 | Val rms_score: 0.8974
294
+ 2025-09-18 23:14:16,302 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0276 | Val rms_score: 0.8755
295
+ 2025-09-18 23:14:19,411 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0165 | Val rms_score: 0.8892
296
+ 2025-09-18 23:14:22,059 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0505 | Val rms_score: 0.8858
297
+ 2025-09-18 23:14:24,669 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0352 | Val rms_score: 0.8951
298
+ 2025-09-18 23:14:27,492 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0290 | Val rms_score: 0.8601
299
+ 2025-09-18 23:14:30,249 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0195 | Val rms_score: 0.8718
300
+ 2025-09-18 23:14:33,319 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0185 | Val rms_score: 0.8651
301
+ 2025-09-18 23:14:36,133 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0144 | Val rms_score: 0.8539
302
+ 2025-09-18 23:14:39,316 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0171 | Val rms_score: 0.8583
303
+ 2025-09-18 23:14:42,111 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0138 | Val rms_score: 0.8628
304
+ 2025-09-18 23:14:44,906 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0126 | Val rms_score: 0.8516
305
+ 2025-09-18 23:14:45,374 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 1037
306
+ 2025-09-18 23:14:45,923 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 61 with val rms_score: 0.8516
307
+ 2025-09-18 23:14:48,781 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0120 | Val rms_score: 0.8638
308
+ 2025-09-18 23:14:51,402 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0144 | Val rms_score: 0.8887
309
+ 2025-09-18 23:14:53,782 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0128 | Val rms_score: 0.8737
310
+ 2025-09-18 23:14:56,563 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0167 | Val rms_score: 0.8671
311
+ 2025-09-18 23:14:59,351 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0133 | Val rms_score: 0.8642
312
+ 2025-09-18 23:15:02,517 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0139 | Val rms_score: 0.8488
313
+ 2025-09-18 23:15:02,663 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 1139
314
+ 2025-09-18 23:15:03,202 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 67 with val rms_score: 0.8488
315
+ 2025-09-18 23:15:05,927 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0164 | Val rms_score: 0.8970
316
+ 2025-09-18 23:15:08,323 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0319 | Val rms_score: 0.8532
317
+ 2025-09-18 23:15:11,051 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0299 | Val rms_score: 0.8236
318
+ 2025-09-18 23:15:11,235 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 1190
319
+ 2025-09-18 23:15:11,777 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 70 with val rms_score: 0.8236
320
+ 2025-09-18 23:15:14,551 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0186 | Val rms_score: 0.8569
321
+ 2025-09-18 23:15:17,665 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0133 | Val rms_score: 0.8600
322
+ 2025-09-18 23:15:20,514 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0135 | Val rms_score: 0.8628
323
+ 2025-09-18 23:15:23,339 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0123 | Val rms_score: 0.8527
324
+ 2025-09-18 23:15:25,709 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0111 | Val rms_score: 0.8707
325
+ 2025-09-18 23:15:28,467 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0129 | Val rms_score: 0.8673
326
+ 2025-09-18 23:15:31,457 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0144 | Val rms_score: 0.8625
327
+ 2025-09-18 23:15:34,216 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0132 | Val rms_score: 0.8601
328
+ 2025-09-18 23:15:37,087 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0128 | Val rms_score: 0.8463
329
+ 2025-09-18 23:15:39,907 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0137 | Val rms_score: 0.8673
330
+ 2025-09-18 23:15:42,526 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0138 | Val rms_score: 0.8574
331
+ 2025-09-18 23:15:45,719 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0124 | Val rms_score: 0.8774
332
+ 2025-09-18 23:15:48,593 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0109 | Val rms_score: 0.8575
333
+ 2025-09-18 23:15:51,399 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0112 | Val rms_score: 0.8548
334
+ 2025-09-18 23:15:54,283 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0146 | Val rms_score: 0.8342
335
+ 2025-09-18 23:15:56,695 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0236 | Val rms_score: 0.8627
336
+ 2025-09-18 23:15:59,817 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0214 | Val rms_score: 0.8980
337
+ 2025-09-18 23:16:02,646 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0190 | Val rms_score: 0.8753
338
+ 2025-09-18 23:16:05,407 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0194 | Val rms_score: 0.8545
339
+ 2025-09-18 23:16:08,159 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0182 | Val rms_score: 0.8512
340
+ 2025-09-18 23:16:10,985 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0535 | Val rms_score: 0.9853
341
+ 2025-09-18 23:16:13,845 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.1025 | Val rms_score: 0.8136
342
+ 2025-09-18 23:16:13,999 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Global step of best model: 1564
343
+ 2025-09-18 23:16:14,564 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Best model saved at epoch 92 with val rms_score: 0.8136
344
+ 2025-09-18 23:16:17,295 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0694 | Val rms_score: 0.8794
345
+ 2025-09-18 23:16:20,106 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0506 | Val rms_score: 0.9417
346
+ 2025-09-18 23:16:22,921 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0275 | Val rms_score: 0.9003
347
+ 2025-09-18 23:16:25,689 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0202 | Val rms_score: 0.8791
348
+ 2025-09-18 23:16:28,422 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0165 | Val rms_score: 0.8771
349
+ 2025-09-18 23:16:31,228 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0141 | Val rms_score: 0.8765
350
+ 2025-09-18 23:16:33,581 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0115 | Val rms_score: 0.8799
351
+ 2025-09-18 23:16:36,265 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0119 | Val rms_score: 0.8805
352
+ 2025-09-18 23:16:36,838 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Test rms_score: 0.6945
353
+ 2025-09-18 23:16:37,190 - logs_modchembert_freesolv_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 0.6049, Std Dev: 0.0666
logs_modchembert_regression_ModChemBERT-MLM-DAPT/modchembert_deepchem_splits_run_lipo_epochs100_batch_size32_20250918_231637.log ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-09-18 23:16:37,192 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Running benchmark for dataset: lipo
2
+ 2025-09-18 23:16:37,192 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - dataset: lipo, tasks: ['exp'], epochs: 100, learning rate: 3e-05, transform: True
3
+ 2025-09-18 23:16:37,205 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Starting triplicate run 1 for dataset lipo at 2025-09-18_23-16-37
4
+ 2025-09-18 23:16:48,207 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.3344 | Val rms_score: 0.8535
5
+ 2025-09-18 23:16:48,207 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 105
6
+ 2025-09-18 23:16:48,719 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.8535
7
+ 2025-09-18 23:17:00,354 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.2875 | Val rms_score: 0.7675
8
+ 2025-09-18 23:17:00,541 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 210
9
+ 2025-09-18 23:17:01,082 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.7675
10
+ 2025-09-18 23:17:10,240 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2354 | Val rms_score: 0.7310
11
+ 2025-09-18 23:17:10,384 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 315
12
+ 2025-09-18 23:17:10,935 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.7310
13
+ 2025-09-18 23:17:20,376 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1734 | Val rms_score: 0.7055
14
+ 2025-09-18 23:17:20,555 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 420
15
+ 2025-09-18 23:17:21,105 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.7055
16
+ 2025-09-18 23:17:30,928 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1725 | Val rms_score: 0.7104
17
+ 2025-09-18 23:17:40,166 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1281 | Val rms_score: 0.6993
18
+ 2025-09-18 23:17:40,646 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 630
19
+ 2025-09-18 23:17:41,191 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.6993
20
+ 2025-09-18 23:17:50,982 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1129 | Val rms_score: 0.6825
21
+ 2025-09-18 23:17:51,172 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 735
22
+ 2025-09-18 23:17:51,718 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 7 with val rms_score: 0.6825
23
+ 2025-09-18 23:18:01,031 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.1031 | Val rms_score: 0.6979
24
+ 2025-09-18 23:18:10,424 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0917 | Val rms_score: 0.6927
25
+ 2025-09-18 23:18:21,150 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0831 | Val rms_score: 0.7026
26
+ 2025-09-18 23:18:30,386 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0813 | Val rms_score: 0.6891
27
+ 2025-09-18 23:18:40,380 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0755 | Val rms_score: 0.7002
28
+ 2025-09-18 23:18:49,807 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0755 | Val rms_score: 0.7185
29
+ 2025-09-18 23:18:59,252 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0728 | Val rms_score: 0.6993
30
+ 2025-09-18 23:19:09,200 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0679 | Val rms_score: 0.6954
31
+ 2025-09-18 23:19:18,398 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0680 | Val rms_score: 0.6926
32
+ 2025-09-18 23:19:28,101 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0566 | Val rms_score: 0.6948
33
+ 2025-09-18 23:19:37,135 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0587 | Val rms_score: 0.6738
34
+ 2025-09-18 23:19:37,279 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 1890
35
+ 2025-09-18 23:19:37,832 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 18 with val rms_score: 0.6738
36
+ 2025-09-18 23:19:47,626 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0572 | Val rms_score: 0.6909
37
+ 2025-09-18 23:19:58,350 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0537 | Val rms_score: 0.6764
38
+ 2025-09-18 23:20:07,546 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0664 | Val rms_score: 0.6877
39
+ 2025-09-18 23:20:17,769 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0820 | Val rms_score: 0.6812
40
+ 2025-09-18 23:20:27,110 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0477 | Val rms_score: 0.6793
41
+ 2025-09-18 23:20:36,947 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0498 | Val rms_score: 0.6705
42
+ 2025-09-18 23:20:37,094 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 2520
43
+ 2025-09-18 23:20:37,654 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 24 with val rms_score: 0.6705
44
+ 2025-09-18 23:20:47,539 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0559 | Val rms_score: 0.6863
45
+ 2025-09-18 23:20:56,759 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0542 | Val rms_score: 0.6546
46
+ 2025-09-18 23:20:57,235 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 2730
47
+ 2025-09-18 23:20:57,784 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 26 with val rms_score: 0.6546
48
+ 2025-09-18 23:21:07,585 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0525 | Val rms_score: 0.6735
49
+ 2025-09-18 23:21:16,840 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0520 | Val rms_score: 0.6713
50
+ 2025-09-18 23:21:27,640 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0493 | Val rms_score: 0.6789
51
+ 2025-09-18 23:21:37,371 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0484 | Val rms_score: 0.6724
52
+ 2025-09-18 23:21:46,567 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0500 | Val rms_score: 0.6651
53
+ 2025-09-18 23:21:56,847 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0495 | Val rms_score: 0.6783
54
+ 2025-09-18 23:22:06,070 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0512 | Val rms_score: 0.6911
55
+ 2025-09-18 23:22:15,755 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0462 | Val rms_score: 0.6827
56
+ 2025-09-18 23:22:24,978 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0460 | Val rms_score: 0.6523
57
+ 2025-09-18 23:22:25,138 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 3675
58
+ 2025-09-18 23:22:25,682 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 35 with val rms_score: 0.6523
59
+ 2025-09-18 23:22:37,427 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0459 | Val rms_score: 0.6587
60
+ 2025-09-18 23:22:49,689 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0452 | Val rms_score: 0.6846
61
+ 2025-09-18 23:23:00,570 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0486 | Val rms_score: 0.6733
62
+ 2025-09-18 23:23:13,780 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0437 | Val rms_score: 0.6629
63
+ 2025-09-18 23:23:24,852 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0447 | Val rms_score: 0.6681
64
+ 2025-09-18 23:23:36,664 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0432 | Val rms_score: 0.6915
65
+ 2025-09-18 23:23:48,850 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0531 | Val rms_score: 0.6660
66
+ 2025-09-18 23:23:59,586 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0464 | Val rms_score: 0.6811
67
+ 2025-09-18 23:24:11,470 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0451 | Val rms_score: 0.6610
68
+ 2025-09-18 23:24:22,957 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0441 | Val rms_score: 0.6676
69
+ 2025-09-18 23:24:34,745 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0458 | Val rms_score: 0.6728
70
+ 2025-09-18 23:24:46,534 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0446 | Val rms_score: 0.6542
71
+ 2025-09-18 23:24:58,425 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0436 | Val rms_score: 0.6636
72
+ 2025-09-18 23:25:10,771 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0458 | Val rms_score: 0.6601
73
+ 2025-09-18 23:25:21,813 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0453 | Val rms_score: 0.6613
74
+ 2025-09-18 23:25:33,838 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0437 | Val rms_score: 0.6724
75
+ 2025-09-18 23:25:44,133 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0419 | Val rms_score: 0.6661
76
+ 2025-09-18 23:25:53,950 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0404 | Val rms_score: 0.6614
77
+ 2025-09-18 23:26:03,627 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0437 | Val rms_score: 0.6691
78
+ 2025-09-18 23:26:12,818 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0396 | Val rms_score: 0.6538
79
+ 2025-09-18 23:26:22,683 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0408 | Val rms_score: 0.6626
80
+ 2025-09-18 23:26:32,167 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0410 | Val rms_score: 0.6679
81
+ 2025-09-18 23:26:42,849 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0431 | Val rms_score: 0.6726
82
+ 2025-09-18 23:26:52,441 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0461 | Val rms_score: 0.6748
83
+ 2025-09-18 23:27:01,629 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0469 | Val rms_score: 0.6750
84
+ 2025-09-18 23:27:11,271 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0445 | Val rms_score: 0.6584
85
+ 2025-09-18 23:27:20,853 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0471 | Val rms_score: 0.6727
86
+ 2025-09-18 23:27:30,641 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0398 | Val rms_score: 0.6616
87
+ 2025-09-18 23:27:39,920 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0406 | Val rms_score: 0.6511
88
+ 2025-09-18 23:27:40,066 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 6720
89
+ 2025-09-18 23:27:40,608 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 64 with val rms_score: 0.6511
90
+ 2025-09-18 23:27:50,761 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0370 | Val rms_score: 0.6690
91
+ 2025-09-18 23:28:01,481 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0456 | Val rms_score: 0.6841
92
+ 2025-09-18 23:28:13,567 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0395 | Val rms_score: 0.6654
93
+ 2025-09-18 23:28:23,726 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0455 | Val rms_score: 0.6732
94
+ 2025-09-18 23:28:32,242 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0405 | Val rms_score: 0.6400
95
+ 2025-09-18 23:28:32,386 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 7245
96
+ 2025-09-18 23:28:32,935 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 69 with val rms_score: 0.6400
97
+ 2025-09-18 23:28:42,734 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0381 | Val rms_score: 0.6805
98
+ 2025-09-18 23:28:52,478 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0403 | Val rms_score: 0.6586
99
+ 2025-09-18 23:29:02,155 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0375 | Val rms_score: 0.6570
100
+ 2025-09-18 23:29:12,356 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0399 | Val rms_score: 0.6521
101
+ 2025-09-18 23:29:24,326 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0395 | Val rms_score: 0.6756
102
+ 2025-09-18 23:29:37,279 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0379 | Val rms_score: 0.6728
103
+ 2025-09-18 23:29:49,806 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0396 | Val rms_score: 0.6619
104
+ 2025-09-18 23:30:03,212 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0406 | Val rms_score: 0.6498
105
+ 2025-09-18 23:30:15,914 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0424 | Val rms_score: 0.6448
106
+ 2025-09-18 23:30:27,014 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0418 | Val rms_score: 0.6617
107
+ 2025-09-18 23:30:36,848 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0419 | Val rms_score: 0.6726
108
+ 2025-09-18 23:30:46,027 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0434 | Val rms_score: 0.6582
109
+ 2025-09-18 23:30:55,971 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0430 | Val rms_score: 0.6570
110
+ 2025-09-18 23:31:06,029 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0417 | Val rms_score: 0.6605
111
+ 2025-09-18 23:31:16,165 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0437 | Val rms_score: 0.6576
112
+ 2025-09-18 23:31:27,453 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0416 | Val rms_score: 0.6692
113
+ 2025-09-18 23:31:37,934 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0352 | Val rms_score: 0.6775
114
+ 2025-09-18 23:31:48,094 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0408 | Val rms_score: 0.6518
115
+ 2025-09-18 23:31:57,797 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0387 | Val rms_score: 0.6731
116
+ 2025-09-18 23:32:07,970 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0396 | Val rms_score: 0.6610
117
+ 2025-09-18 23:32:20,197 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0387 | Val rms_score: 0.6690
118
+ 2025-09-18 23:32:32,225 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0359 | Val rms_score: 0.6657
119
+ 2025-09-18 23:32:45,304 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0385 | Val rms_score: 0.6583
120
+ 2025-09-18 23:32:57,354 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0394 | Val rms_score: 0.6607
121
+ 2025-09-18 23:33:09,418 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0362 | Val rms_score: 0.6526
122
+ 2025-09-18 23:33:21,379 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0398 | Val rms_score: 0.6790
123
+ 2025-09-18 23:33:32,117 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0371 | Val rms_score: 0.6528
124
+ 2025-09-18 23:33:42,283 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0368 | Val rms_score: 0.6627
125
+ 2025-09-18 23:33:51,487 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0373 | Val rms_score: 0.6710
126
+ 2025-09-18 23:34:01,380 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0365 | Val rms_score: 0.6768
127
+ 2025-09-18 23:34:11,240 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0381 | Val rms_score: 0.6675
128
+ 2025-09-18 23:34:12,009 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Test rms_score: 0.6823
129
+ 2025-09-18 23:34:12,360 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Starting triplicate run 2 for dataset lipo at 2025-09-18_23-34-12
130
+ 2025-09-18 23:34:23,917 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.3969 | Val rms_score: 0.8040
131
+ 2025-09-18 23:34:23,917 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 105
132
+ 2025-09-18 23:34:24,448 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.8040
133
+ 2025-09-18 23:34:34,802 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.2469 | Val rms_score: 0.7314
134
+ 2025-09-18 23:34:34,973 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 210
135
+ 2025-09-18 23:34:35,523 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.7314
136
+ 2025-09-18 23:34:47,647 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2281 | Val rms_score: 0.7416
137
+ 2025-09-18 23:35:00,586 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.1594 | Val rms_score: 0.7208
138
+ 2025-09-18 23:35:00,746 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 420
139
+ 2025-09-18 23:35:01,286 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 4 with val rms_score: 0.7208
140
+ 2025-09-18 23:35:14,433 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1400 | Val rms_score: 0.6961
141
+ 2025-09-18 23:35:14,613 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 525
142
+ 2025-09-18 23:35:15,156 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.6961
143
+ 2025-09-18 23:35:25,199 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1260 | Val rms_score: 0.6720
144
+ 2025-09-18 23:35:25,703 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 630
145
+ 2025-09-18 23:35:26,265 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.6720
146
+ 2025-09-18 23:35:36,024 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1250 | Val rms_score: 0.6904
147
+ 2025-09-18 23:35:45,411 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0949 | Val rms_score: 0.6687
148
+ 2025-09-18 23:35:45,590 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 840
149
+ 2025-09-18 23:35:46,143 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 0.6687
150
+ 2025-09-18 23:35:55,331 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0858 | Val rms_score: 0.6768
151
+ 2025-09-18 23:36:06,060 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0856 | Val rms_score: 0.6902
152
+ 2025-09-18 23:36:15,216 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0750 | Val rms_score: 0.7186
153
+ 2025-09-18 23:36:25,120 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0760 | Val rms_score: 0.6667
154
+ 2025-09-18 23:36:25,268 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 1260
155
+ 2025-09-18 23:36:25,820 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 12 with val rms_score: 0.6667
156
+ 2025-09-18 23:36:37,200 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0663 | Val rms_score: 0.6719
157
+ 2025-09-18 23:36:48,771 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0652 | Val rms_score: 0.6842
158
+ 2025-09-18 23:37:01,271 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0642 | Val rms_score: 0.6689
159
+ 2025-09-18 23:37:12,991 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0668 | Val rms_score: 0.6816
160
+ 2025-09-18 23:37:25,071 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0607 | Val rms_score: 0.6725
161
+ 2025-09-18 23:37:36,565 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0580 | Val rms_score: 0.6725
162
+ 2025-09-18 23:37:48,523 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0572 | Val rms_score: 0.6692
163
+ 2025-09-18 23:38:01,617 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0556 | Val rms_score: 0.6625
164
+ 2025-09-18 23:38:01,762 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 2100
165
+ 2025-09-18 23:38:02,349 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 20 with val rms_score: 0.6625
166
+ 2025-09-18 23:38:13,778 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0773 | Val rms_score: 0.6867
167
+ 2025-09-18 23:38:26,422 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0625 | Val rms_score: 0.6701
168
+ 2025-09-18 23:38:38,124 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0500 | Val rms_score: 0.6767
169
+ 2025-09-18 23:38:49,848 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0539 | Val rms_score: 0.6694
170
+ 2025-09-18 23:39:00,961 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0469 | Val rms_score: 0.6644
171
+ 2025-09-18 23:39:10,248 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0526 | Val rms_score: 0.6689
172
+ 2025-09-18 23:39:20,500 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0538 | Val rms_score: 0.6662
173
+ 2025-09-18 23:39:29,615 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0504 | Val rms_score: 0.6676
174
+ 2025-09-18 23:39:40,079 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0479 | Val rms_score: 0.6663
175
+ 2025-09-18 23:39:49,777 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0450 | Val rms_score: 0.6780
176
+ 2025-09-18 23:39:59,132 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0509 | Val rms_score: 0.6768
177
+ 2025-09-18 23:40:09,367 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0448 | Val rms_score: 0.6732
178
+ 2025-09-18 23:40:18,624 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0478 | Val rms_score: 0.6620
179
+ 2025-09-18 23:40:18,775 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 3465
180
+ 2025-09-18 23:40:19,328 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 33 with val rms_score: 0.6620
181
+ 2025-09-18 23:40:29,249 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0487 | Val rms_score: 0.6670
182
+ 2025-09-18 23:40:38,920 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0471 | Val rms_score: 0.6838
183
+ 2025-09-18 23:40:48,263 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0443 | Val rms_score: 0.6513
184
+ 2025-09-18 23:40:48,771 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 3780
185
+ 2025-09-18 23:40:49,330 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 36 with val rms_score: 0.6513
186
+ 2025-09-18 23:40:59,270 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0437 | Val rms_score: 0.6797
187
+ 2025-09-18 23:41:07,996 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0462 | Val rms_score: 0.6746
188
+ 2025-09-18 23:41:19,122 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0470 | Val rms_score: 0.6761
189
+ 2025-09-18 23:41:28,748 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0434 | Val rms_score: 0.6705
190
+ 2025-09-18 23:41:38,164 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0402 | Val rms_score: 0.6747
191
+ 2025-09-18 23:41:48,357 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0455 | Val rms_score: 0.6747
192
+ 2025-09-18 23:41:57,157 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0411 | Val rms_score: 0.6694
193
+ 2025-09-18 23:42:07,154 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0455 | Val rms_score: 0.6722
194
+ 2025-09-18 23:42:16,268 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0441 | Val rms_score: 0.6700
195
+ 2025-09-18 23:42:26,130 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0435 | Val rms_score: 0.6741
196
+ 2025-09-18 23:42:35,484 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0442 | Val rms_score: 0.6978
197
+ 2025-09-18 23:42:45,846 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0408 | Val rms_score: 0.6718
198
+ 2025-09-18 23:42:55,923 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0420 | Val rms_score: 0.6641
199
+ 2025-09-18 23:43:05,163 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0403 | Val rms_score: 0.6800
200
+ 2025-09-18 23:43:15,136 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0420 | Val rms_score: 0.6655
201
+ 2025-09-18 23:43:24,255 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0484 | Val rms_score: 0.6700
202
+ 2025-09-18 23:43:34,199 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0425 | Val rms_score: 0.6655
203
+ 2025-09-18 23:43:42,984 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0406 | Val rms_score: 0.6561
204
+ 2025-09-18 23:43:52,783 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0371 | Val rms_score: 0.6642
205
+ 2025-09-18 23:44:02,423 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0379 | Val rms_score: 0.6612
206
+ 2025-09-18 23:44:12,022 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0381 | Val rms_score: 0.6761
207
+ 2025-09-18 23:44:22,810 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0370 | Val rms_score: 0.6664
208
+ 2025-09-18 23:44:32,020 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0405 | Val rms_score: 0.6805
209
+ 2025-09-18 23:44:41,890 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0380 | Val rms_score: 0.6714
210
+ 2025-09-18 23:44:51,099 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0385 | Val rms_score: 0.6605
211
+ 2025-09-18 23:45:00,941 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0498 | Val rms_score: 0.6674
212
+ 2025-09-18 23:45:10,809 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0422 | Val rms_score: 0.6608
213
+ 2025-09-18 23:45:20,052 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0357 | Val rms_score: 0.6640
214
+ 2025-09-18 23:45:29,950 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0400 | Val rms_score: 0.6675
215
+ 2025-09-18 23:45:39,151 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0411 | Val rms_score: 0.6576
216
+ 2025-09-18 23:45:49,788 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0382 | Val rms_score: 0.6762
217
+ 2025-09-18 23:45:59,667 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0404 | Val rms_score: 0.6733
218
+ 2025-09-18 23:46:08,868 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0437 | Val rms_score: 0.6833
219
+ 2025-09-18 23:46:18,365 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0367 | Val rms_score: 0.6582
220
+ 2025-09-18 23:46:27,333 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0401 | Val rms_score: 0.6723
221
+ 2025-09-18 23:46:37,548 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0388 | Val rms_score: 0.6683
222
+ 2025-09-18 23:46:46,709 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0349 | Val rms_score: 0.6636
223
+ 2025-09-18 23:46:56,241 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0384 | Val rms_score: 0.6773
224
+ 2025-09-18 23:47:06,099 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0362 | Val rms_score: 0.6671
225
+ 2025-09-18 23:47:15,340 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0381 | Val rms_score: 0.6843
226
+ 2025-09-18 23:47:26,430 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0340 | Val rms_score: 0.6692
227
+ 2025-09-18 23:47:35,710 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0372 | Val rms_score: 0.6702
228
+ 2025-09-18 23:47:45,492 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0349 | Val rms_score: 0.6664
229
+ 2025-09-18 23:47:54,980 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0373 | Val rms_score: 0.6766
230
+ 2025-09-18 23:48:04,334 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0379 | Val rms_score: 0.6819
231
+ 2025-09-18 23:48:14,584 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0361 | Val rms_score: 0.6611
232
+ 2025-09-18 23:48:23,791 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0417 | Val rms_score: 0.6703
233
+ 2025-09-18 23:48:33,634 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0373 | Val rms_score: 0.6658
234
+ 2025-09-18 23:48:42,900 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0391 | Val rms_score: 0.6752
235
+ 2025-09-18 23:48:53,184 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0349 | Val rms_score: 0.6849
236
+ 2025-09-18 23:49:02,844 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0371 | Val rms_score: 0.6644
237
+ 2025-09-18 23:49:11,907 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0383 | Val rms_score: 0.6511
238
+ 2025-09-18 23:49:12,059 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 9240
239
+ 2025-09-18 23:49:12,580 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 88 with val rms_score: 0.6511
240
+ 2025-09-18 23:49:22,437 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0382 | Val rms_score: 0.6878
241
+ 2025-09-18 23:49:31,586 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0348 | Val rms_score: 0.6766
242
+ 2025-09-18 23:49:41,331 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0358 | Val rms_score: 0.6687
243
+ 2025-09-18 23:49:51,480 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0367 | Val rms_score: 0.6791
244
+ 2025-09-18 23:50:00,610 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0351 | Val rms_score: 0.6659
245
+ 2025-09-18 23:50:10,456 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0379 | Val rms_score: 0.6678
246
+ 2025-09-18 23:50:19,662 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0360 | Val rms_score: 0.6600
247
+ 2025-09-18 23:50:29,734 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0344 | Val rms_score: 0.6739
248
+ 2025-09-18 23:50:39,343 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0338 | Val rms_score: 0.6549
249
+ 2025-09-18 23:50:48,840 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0337 | Val rms_score: 0.6718
250
+ 2025-09-18 23:50:58,743 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0380 | Val rms_score: 0.6700
251
+ 2025-09-18 23:51:07,893 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0352 | Val rms_score: 0.6682
252
+ 2025-09-18 23:51:08,647 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Test rms_score: 0.6922
253
+ 2025-09-18 23:51:09,006 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Starting triplicate run 3 for dataset lipo at 2025-09-18_23-51-09
254
+ 2025-09-18 23:51:18,521 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 1/100 | Train Loss: 0.3484 | Val rms_score: 0.8474
255
+ 2025-09-18 23:51:18,522 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 105
256
+ 2025-09-18 23:51:19,054 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 1 with val rms_score: 0.8474
257
+ 2025-09-18 23:51:28,426 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 2/100 | Train Loss: 0.3344 | Val rms_score: 0.7868
258
+ 2025-09-18 23:51:28,597 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 210
259
+ 2025-09-18 23:51:29,132 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 2 with val rms_score: 0.7868
260
+ 2025-09-18 23:51:38,301 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 3/100 | Train Loss: 0.2594 | Val rms_score: 0.7202
261
+ 2025-09-18 23:51:38,478 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 315
262
+ 2025-09-18 23:51:39,004 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 3 with val rms_score: 0.7202
263
+ 2025-09-18 23:51:48,640 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 4/100 | Train Loss: 0.2016 | Val rms_score: 0.7719
264
+ 2025-09-18 23:51:57,553 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 5/100 | Train Loss: 0.1713 | Val rms_score: 0.7134
265
+ 2025-09-18 23:51:57,730 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 525
266
+ 2025-09-18 23:51:58,271 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 5 with val rms_score: 0.7134
267
+ 2025-09-18 23:52:08,106 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 6/100 | Train Loss: 0.1396 | Val rms_score: 0.7031
268
+ 2025-09-18 23:52:08,636 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 630
269
+ 2025-09-18 23:52:09,173 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 6 with val rms_score: 0.7031
270
+ 2025-09-18 23:52:18,750 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 7/100 | Train Loss: 0.1321 | Val rms_score: 0.7573
271
+ 2025-09-18 23:52:27,900 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 8/100 | Train Loss: 0.0984 | Val rms_score: 0.7008
272
+ 2025-09-18 23:52:28,084 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 840
273
+ 2025-09-18 23:52:28,612 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 8 with val rms_score: 0.7008
274
+ 2025-09-18 23:52:38,440 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 9/100 | Train Loss: 0.0931 | Val rms_score: 0.6720
275
+ 2025-09-18 23:52:38,617 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 945
276
+ 2025-09-18 23:52:39,151 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 9 with val rms_score: 0.6720
277
+ 2025-09-18 23:52:49,262 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 10/100 | Train Loss: 0.0862 | Val rms_score: 0.7053
278
+ 2025-09-18 23:52:58,578 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 11/100 | Train Loss: 0.0807 | Val rms_score: 0.6868
279
+ 2025-09-18 23:53:08,722 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 12/100 | Train Loss: 0.0844 | Val rms_score: 0.6855
280
+ 2025-09-18 23:53:17,248 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 13/100 | Train Loss: 0.0774 | Val rms_score: 0.7012
281
+ 2025-09-18 23:53:27,073 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 14/100 | Train Loss: 0.0759 | Val rms_score: 0.6651
282
+ 2025-09-18 23:53:27,216 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 1470
283
+ 2025-09-18 23:53:27,759 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 14 with val rms_score: 0.6651
284
+ 2025-09-18 23:53:36,902 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 15/100 | Train Loss: 0.0717 | Val rms_score: 0.6895
285
+ 2025-09-18 23:53:46,673 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 16/100 | Train Loss: 0.0684 | Val rms_score: 0.6979
286
+ 2025-09-18 23:53:56,606 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 17/100 | Train Loss: 0.0640 | Val rms_score: 0.6799
287
+ 2025-09-18 23:54:05,888 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 18/100 | Train Loss: 0.0590 | Val rms_score: 0.6815
288
+ 2025-09-18 23:54:15,702 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 19/100 | Train Loss: 0.0586 | Val rms_score: 0.6997
289
+ 2025-09-18 23:54:25,696 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 20/100 | Train Loss: 0.0572 | Val rms_score: 0.6884
290
+ 2025-09-18 23:54:35,368 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 21/100 | Train Loss: 0.0559 | Val rms_score: 0.6709
291
+ 2025-09-18 23:54:44,704 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 22/100 | Train Loss: 0.0551 | Val rms_score: 0.6874
292
+ 2025-09-18 23:54:54,336 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 23/100 | Train Loss: 0.0516 | Val rms_score: 0.6808
293
+ 2025-09-18 23:55:04,001 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 24/100 | Train Loss: 0.0570 | Val rms_score: 0.6697
294
+ 2025-09-18 23:55:13,189 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 25/100 | Train Loss: 0.0512 | Val rms_score: 0.6839
295
+ 2025-09-18 23:55:23,100 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 26/100 | Train Loss: 0.0565 | Val rms_score: 0.7028
296
+ 2025-09-18 23:55:32,500 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 27/100 | Train Loss: 0.0520 | Val rms_score: 0.6868
297
+ 2025-09-18 23:55:42,224 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 28/100 | Train Loss: 0.0523 | Val rms_score: 0.6720
298
+ 2025-09-18 23:55:52,521 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 29/100 | Train Loss: 0.0507 | Val rms_score: 0.6700
299
+ 2025-09-18 23:56:01,509 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 30/100 | Train Loss: 0.0497 | Val rms_score: 0.6811
300
+ 2025-09-18 23:56:11,267 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 31/100 | Train Loss: 0.0491 | Val rms_score: 0.6745
301
+ 2025-09-18 23:56:20,716 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 32/100 | Train Loss: 0.0513 | Val rms_score: 0.6883
302
+ 2025-09-18 23:56:30,534 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 33/100 | Train Loss: 0.0514 | Val rms_score: 0.6671
303
+ 2025-09-18 23:56:39,713 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 34/100 | Train Loss: 0.0482 | Val rms_score: 0.6971
304
+ 2025-09-18 23:56:49,434 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 35/100 | Train Loss: 0.0465 | Val rms_score: 0.6760
305
+ 2025-09-18 23:56:59,000 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 36/100 | Train Loss: 0.0451 | Val rms_score: 0.6740
306
+ 2025-09-18 23:57:08,514 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 37/100 | Train Loss: 0.0461 | Val rms_score: 0.6832
307
+ 2025-09-18 23:57:18,294 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 38/100 | Train Loss: 0.0486 | Val rms_score: 0.6801
308
+ 2025-09-18 23:57:27,751 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 39/100 | Train Loss: 0.0437 | Val rms_score: 0.6684
309
+ 2025-09-18 23:57:37,578 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 40/100 | Train Loss: 0.0437 | Val rms_score: 0.6790
310
+ 2025-09-18 23:57:46,673 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 41/100 | Train Loss: 0.0449 | Val rms_score: 0.6797
311
+ 2025-09-18 23:57:56,393 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 42/100 | Train Loss: 0.0445 | Val rms_score: 0.6587
312
+ 2025-09-18 23:57:56,542 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Global step of best model: 4410
313
+ 2025-09-18 23:57:57,082 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Best model saved at epoch 42 with val rms_score: 0.6587
314
+ 2025-09-18 23:58:06,871 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 43/100 | Train Loss: 0.0466 | Val rms_score: 0.6863
315
+ 2025-09-18 23:58:16,031 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 44/100 | Train Loss: 0.0484 | Val rms_score: 0.6850
316
+ 2025-09-18 23:58:25,852 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 45/100 | Train Loss: 0.0419 | Val rms_score: 0.6827
317
+ 2025-09-18 23:58:34,958 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 46/100 | Train Loss: 0.0440 | Val rms_score: 0.6830
318
+ 2025-09-18 23:58:44,819 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 47/100 | Train Loss: 0.0406 | Val rms_score: 0.6865
319
+ 2025-09-18 23:58:54,939 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 48/100 | Train Loss: 0.0463 | Val rms_score: 0.6667
320
+ 2025-09-18 23:59:04,046 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 49/100 | Train Loss: 0.0455 | Val rms_score: 0.6893
321
+ 2025-09-18 23:59:13,836 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 50/100 | Train Loss: 0.0506 | Val rms_score: 0.6799
322
+ 2025-09-18 23:59:22,946 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 51/100 | Train Loss: 0.0455 | Val rms_score: 0.6851
323
+ 2025-09-18 23:59:33,093 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 52/100 | Train Loss: 0.0458 | Val rms_score: 0.6974
324
+ 2025-09-18 23:59:42,375 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 53/100 | Train Loss: 0.0421 | Val rms_score: 0.6724
325
+ 2025-09-18 23:59:51,779 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 54/100 | Train Loss: 0.0442 | Val rms_score: 0.6895
326
+ 2025-09-19 00:00:01,635 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 55/100 | Train Loss: 0.0419 | Val rms_score: 0.6919
327
+ 2025-09-19 00:00:10,673 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 56/100 | Train Loss: 0.0463 | Val rms_score: 0.6858
328
+ 2025-09-19 00:00:20,339 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 57/100 | Train Loss: 0.0410 | Val rms_score: 0.6842
329
+ 2025-09-19 00:00:30,383 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 58/100 | Train Loss: 0.0413 | Val rms_score: 0.6764
330
+ 2025-09-19 00:00:40,071 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 59/100 | Train Loss: 0.0411 | Val rms_score: 0.6864
331
+ 2025-09-19 00:00:49,652 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 60/100 | Train Loss: 0.0413 | Val rms_score: 0.6817
332
+ 2025-09-19 00:00:58,927 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 61/100 | Train Loss: 0.0391 | Val rms_score: 0.6992
333
+ 2025-09-19 00:01:09,039 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 62/100 | Train Loss: 0.0418 | Val rms_score: 0.6668
334
+ 2025-09-19 00:01:18,209 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 63/100 | Train Loss: 0.0411 | Val rms_score: 0.6789
335
+ 2025-09-19 00:01:28,041 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 64/100 | Train Loss: 0.0418 | Val rms_score: 0.6799
336
+ 2025-09-19 00:01:36,827 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 65/100 | Train Loss: 0.0413 | Val rms_score: 0.6814
337
+ 2025-09-19 00:01:46,206 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 66/100 | Train Loss: 0.0398 | Val rms_score: 0.6868
338
+ 2025-09-19 00:01:56,920 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 67/100 | Train Loss: 0.0431 | Val rms_score: 0.6772
339
+ 2025-09-19 00:02:06,048 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 68/100 | Train Loss: 0.0443 | Val rms_score: 0.7007
340
+ 2025-09-19 00:02:15,831 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 69/100 | Train Loss: 0.0425 | Val rms_score: 0.6806
341
+ 2025-09-19 00:02:24,984 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 70/100 | Train Loss: 0.0397 | Val rms_score: 0.6825
342
+ 2025-09-19 00:02:34,752 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 71/100 | Train Loss: 0.0423 | Val rms_score: 0.6787
343
+ 2025-09-19 00:02:44,528 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 72/100 | Train Loss: 0.0432 | Val rms_score: 0.6698
344
+ 2025-09-19 00:02:53,893 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 73/100 | Train Loss: 0.0401 | Val rms_score: 0.6785
345
+ 2025-09-19 00:03:03,285 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 74/100 | Train Loss: 0.0393 | Val rms_score: 0.6724
346
+ 2025-09-19 00:03:12,425 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 75/100 | Train Loss: 0.0365 | Val rms_score: 0.6745
347
+ 2025-09-19 00:03:22,245 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 76/100 | Train Loss: 0.0426 | Val rms_score: 0.6675
348
+ 2025-09-19 00:03:32,947 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 77/100 | Train Loss: 0.0397 | Val rms_score: 0.6866
349
+ 2025-09-19 00:03:42,728 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 78/100 | Train Loss: 0.0391 | Val rms_score: 0.6755
350
+ 2025-09-19 00:03:52,358 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 79/100 | Train Loss: 0.0391 | Val rms_score: 0.6867
351
+ 2025-09-19 00:04:01,483 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 80/100 | Train Loss: 0.0398 | Val rms_score: 0.6775
352
+ 2025-09-19 00:04:11,252 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 81/100 | Train Loss: 0.0305 | Val rms_score: 0.6729
353
+ 2025-09-19 00:04:20,593 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 82/100 | Train Loss: 0.0488 | Val rms_score: 0.6725
354
+ 2025-09-19 00:04:30,022 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 83/100 | Train Loss: 0.0432 | Val rms_score: 0.6829
355
+ 2025-09-19 00:04:39,256 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 84/100 | Train Loss: 0.0385 | Val rms_score: 0.6733
356
+ 2025-09-19 00:04:48,987 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 85/100 | Train Loss: 0.0431 | Val rms_score: 0.6731
357
+ 2025-09-19 00:04:59,540 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 86/100 | Train Loss: 0.0404 | Val rms_score: 0.6791
358
+ 2025-09-19 00:05:09,071 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 87/100 | Train Loss: 0.0375 | Val rms_score: 0.6728
359
+ 2025-09-19 00:05:18,797 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 88/100 | Train Loss: 0.0379 | Val rms_score: 0.6679
360
+ 2025-09-19 00:05:27,891 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 89/100 | Train Loss: 0.0363 | Val rms_score: 0.6782
361
+ 2025-09-19 00:05:37,599 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 90/100 | Train Loss: 0.0398 | Val rms_score: 0.6828
362
+ 2025-09-19 00:05:46,784 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 91/100 | Train Loss: 0.0395 | Val rms_score: 0.6697
363
+ 2025-09-19 00:05:56,241 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 92/100 | Train Loss: 0.0396 | Val rms_score: 0.6796
364
+ 2025-09-19 00:06:06,042 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 93/100 | Train Loss: 0.0382 | Val rms_score: 0.6812
365
+ 2025-09-19 00:06:15,230 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 94/100 | Train Loss: 0.0422 | Val rms_score: 0.6745
366
+ 2025-09-19 00:06:25,022 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 95/100 | Train Loss: 0.0387 | Val rms_score: 0.6907
367
+ 2025-09-19 00:06:35,466 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 96/100 | Train Loss: 0.0348 | Val rms_score: 0.6724
368
+ 2025-09-19 00:06:45,250 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 97/100 | Train Loss: 0.0368 | Val rms_score: 0.6745
369
+ 2025-09-19 00:06:55,079 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 98/100 | Train Loss: 0.0382 | Val rms_score: 0.6913
370
+ 2025-09-19 00:07:04,120 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 99/100 | Train Loss: 0.0382 | Val rms_score: 0.6734
371
+ 2025-09-19 00:07:13,911 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Epoch 100/100 | Train Loss: 0.0383 | Val rms_score: 0.6610
372
+ 2025-09-19 00:07:14,448 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Test rms_score: 0.6876
373
+ 2025-09-19 00:07:14,828 - logs_modchembert_lipo_epochs100_batch_size32 - INFO - Final Triplicate Test Results — Avg rms_score: 0.6874, Std Dev: 0.0040
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3efb42279804940552d9af7328ae9b96d8a8b6a147609ed30aa2dfb21d95fff1
3
+ size 230524062
modeling_modchembert.py ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Emmanuel Cortes, All Rights Reserved.
2
+ #
3
+ # Copyright 2024 Answer.AI, LightOn, and contributors, and the HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ # This file is adapted from the transformers library.
19
+ # Modifications include:
20
+ # - Additional classifier_pooling options for ModChemBertForSequenceClassification
21
+ # - sum_mean, sum_sum, mean_sum, mean_mean: from ChemLM (utilizes all hidden states)
22
+ # - max_cls, cls_mha, max_seq_mha: from MaxPoolBERT (utilizes last k hidden states)
23
+ # - max_seq_mean: a merge between sum_mean and max_cls (utilizes last k hidden states)
24
+ # - Addition of ModChemBertPoolingAttention for cls_mha and max_seq_mha pooling options
25
+
26
+ import copy
27
+ import math
28
+ import typing
29
+ from contextlib import nullcontext
30
+
31
+ import torch
32
+ import torch.nn as nn
33
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
34
+ from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
35
+ from transformers.modeling_outputs import MaskedLMOutput, SequenceClassifierOutput
36
+ from transformers.models.modernbert.modeling_modernbert import (
37
+ MODERNBERT_ATTENTION_FUNCTION,
38
+ ModernBertModel,
39
+ ModernBertPredictionHead,
40
+ ModernBertPreTrainedModel,
41
+ ModernBertRotaryEmbedding,
42
+ _pad_modernbert_output,
43
+ _unpad_modernbert_input,
44
+ )
45
+ from transformers.utils import logging
46
+
47
+ from .configuration_modchembert import ModChemBertConfig
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+
52
+ class InitWeightsMixin:
53
+ def _init_weights(self, module: nn.Module):
54
+ super()._init_weights(module) # type: ignore
55
+
56
+ cutoff_factor = self.config.initializer_cutoff_factor # type: ignore
57
+ if cutoff_factor is None:
58
+ cutoff_factor = 3
59
+
60
+ def init_weight(module: nn.Module, std: float):
61
+ if isinstance(module, nn.Linear):
62
+ nn.init.trunc_normal_(
63
+ module.weight,
64
+ mean=0.0,
65
+ std=std,
66
+ a=-cutoff_factor * std,
67
+ b=cutoff_factor * std,
68
+ )
69
+ if module.bias is not None:
70
+ nn.init.zeros_(module.bias)
71
+
72
+ stds = {
73
+ "in": self.config.initializer_range, # type: ignore
74
+ "out": self.config.initializer_range / math.sqrt(2.0 * self.config.num_hidden_layers), # type: ignore
75
+ "final_out": self.config.hidden_size**-0.5, # type: ignore
76
+ }
77
+
78
+ if isinstance(module, ModChemBertForMaskedLM):
79
+ init_weight(module.decoder, stds["out"])
80
+ elif isinstance(module, ModChemBertForSequenceClassification):
81
+ init_weight(module.classifier, stds["final_out"])
82
+ elif isinstance(module, ModChemBertPoolingAttention):
83
+ init_weight(module.Wq, stds["in"])
84
+ init_weight(module.Wk, stds["in"])
85
+ init_weight(module.Wv, stds["in"])
86
+ init_weight(module.Wo, stds["out"])
87
+
88
+
89
+ class ModChemBertPoolingAttention(nn.Module):
90
+ """Performs multi-headed self attention on a batch of sequences."""
91
+
92
+ def __init__(self, config: ModChemBertConfig):
93
+ super().__init__()
94
+ self.config = copy.deepcopy(config)
95
+ # Override num_attention_heads to use classifier_pooling_num_attention_heads
96
+ self.config.num_attention_heads = config.classifier_pooling_num_attention_heads
97
+ # Override attention_dropout to use classifier_pooling_attention_dropout
98
+ self.config.attention_dropout = config.classifier_pooling_attention_dropout
99
+
100
+ if config.hidden_size % config.num_attention_heads != 0:
101
+ raise ValueError(
102
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads "
103
+ f"({config.num_attention_heads})"
104
+ )
105
+
106
+ self.attention_dropout = config.attention_dropout
107
+ self.num_heads = config.num_attention_heads
108
+ self.head_dim = config.hidden_size // config.num_attention_heads
109
+ self.all_head_size = self.head_dim * self.num_heads
110
+ self.Wq = nn.Linear(config.hidden_size, self.all_head_size, bias=config.attention_bias)
111
+ self.Wk = nn.Linear(config.hidden_size, self.all_head_size, bias=config.attention_bias)
112
+ self.Wv = nn.Linear(config.hidden_size, self.all_head_size, bias=config.attention_bias)
113
+
114
+ # Use global attention
115
+ self.local_attention = (-1, -1)
116
+ rope_theta = config.global_rope_theta
117
+ # sdpa path from original ModernBert implementation
118
+ config_copy = copy.deepcopy(config)
119
+ config_copy.rope_theta = rope_theta
120
+ self.rotary_emb = ModernBertRotaryEmbedding(config=config_copy)
121
+
122
+ self.Wo = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias)
123
+ self.out_drop = nn.Dropout(config.attention_dropout) if config.attention_dropout > 0.0 else nn.Identity()
124
+ self.pruned_heads = set()
125
+
126
+ def forward(
127
+ self,
128
+ q: torch.Tensor,
129
+ kv: torch.Tensor,
130
+ attention_mask: torch.Tensor | None = None,
131
+ **kwargs,
132
+ ) -> torch.Tensor:
133
+ bs, seq_len = kv.shape[:2]
134
+ q_proj: torch.Tensor = self.Wq(q)
135
+ k_proj: torch.Tensor = self.Wk(kv)
136
+ v_proj: torch.Tensor = self.Wv(kv)
137
+ qkv = torch.stack(
138
+ (
139
+ q_proj.reshape(bs, seq_len, self.num_heads, self.head_dim),
140
+ k_proj.reshape(bs, seq_len, self.num_heads, self.head_dim),
141
+ v_proj.reshape(bs, seq_len, self.num_heads, self.head_dim),
142
+ ),
143
+ dim=2,
144
+ ) # (bs, seq_len, 3, num_heads, head_dim)
145
+
146
+ device = kv.device
147
+ if attention_mask is None:
148
+ attention_mask = torch.ones((bs, seq_len), device=device, dtype=torch.bool)
149
+ position_ids = torch.arange(seq_len, device=device).unsqueeze(0).long()
150
+
151
+ attn_outputs = MODERNBERT_ATTENTION_FUNCTION["sdpa"](
152
+ self,
153
+ qkv=qkv,
154
+ attention_mask=_prepare_4d_attention_mask(attention_mask, kv.dtype),
155
+ sliding_window_mask=None, # not needed when using global attention
156
+ position_ids=position_ids,
157
+ local_attention=self.local_attention,
158
+ bs=bs,
159
+ dim=self.all_head_size,
160
+ **kwargs,
161
+ )
162
+ hidden_states = attn_outputs[0]
163
+ hidden_states = self.out_drop(self.Wo(hidden_states))
164
+
165
+ return hidden_states
166
+
167
+
168
+ class ModChemBertForMaskedLM(InitWeightsMixin, ModernBertPreTrainedModel):
169
+ config_class = ModChemBertConfig
170
+ _tied_weights_keys = ["decoder.weight"]
171
+
172
+ def __init__(self, config: ModChemBertConfig):
173
+ super().__init__(config)
174
+ self.config = config
175
+ self.model = ModernBertModel(config)
176
+ self.head = ModernBertPredictionHead(config)
177
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=config.decoder_bias)
178
+
179
+ self.sparse_prediction = self.config.sparse_prediction
180
+ self.sparse_pred_ignore_index = self.config.sparse_pred_ignore_index
181
+
182
+ # Initialize weights and apply final processing
183
+ self.post_init()
184
+
185
+ def get_output_embeddings(self):
186
+ return self.decoder
187
+
188
+ def set_output_embeddings(self, new_embeddings: nn.Linear):
189
+ self.decoder = new_embeddings
190
+
191
+ @torch.compile(dynamic=True)
192
+ def compiled_head(self, output: torch.Tensor) -> torch.Tensor:
193
+ return self.decoder(self.head(output))
194
+
195
+ def forward(
196
+ self,
197
+ input_ids: torch.LongTensor | None = None,
198
+ attention_mask: torch.Tensor | None = None,
199
+ sliding_window_mask: torch.Tensor | None = None,
200
+ position_ids: torch.Tensor | None = None,
201
+ inputs_embeds: torch.Tensor | None = None,
202
+ labels: torch.Tensor | None = None,
203
+ indices: torch.Tensor | None = None,
204
+ cu_seqlens: torch.Tensor | None = None,
205
+ max_seqlen: int | None = None,
206
+ batch_size: int | None = None,
207
+ seq_len: int | None = None,
208
+ output_attentions: bool | None = None,
209
+ output_hidden_states: bool | None = None,
210
+ return_dict: bool | None = None,
211
+ **kwargs,
212
+ ) -> tuple[torch.Tensor] | tuple[torch.Tensor, typing.Any] | MaskedLMOutput:
213
+ r"""
214
+ sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
215
+ Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
216
+ perform global attention, while the rest perform local attention. This mask is used to avoid attending to
217
+ far-away tokens in the local attention layers when not using Flash Attention.
218
+ indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
219
+ Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
220
+ cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
221
+ Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
222
+ max_seqlen (`int`, *optional*):
223
+ Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids & pad output tensors.
224
+ batch_size (`int`, *optional*):
225
+ Batch size of the input sequences. Used to pad the output tensors.
226
+ seq_len (`int`, *optional*):
227
+ Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
228
+ """
229
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
230
+ self._maybe_set_compile()
231
+
232
+ if self.config._attn_implementation == "flash_attention_2": # noqa: SIM102
233
+ if indices is None and cu_seqlens is None and max_seqlen is None:
234
+ if batch_size is None and seq_len is None:
235
+ if inputs_embeds is not None:
236
+ batch_size, seq_len = inputs_embeds.shape[:2]
237
+ else:
238
+ batch_size, seq_len = input_ids.shape[:2] # type: ignore
239
+ device = input_ids.device if input_ids is not None else inputs_embeds.device # type: ignore
240
+
241
+ if attention_mask is None:
242
+ attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) # type: ignore
243
+
244
+ if inputs_embeds is None:
245
+ with torch.no_grad():
246
+ input_ids, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input(
247
+ inputs=input_ids, # type: ignore
248
+ attention_mask=attention_mask, # type: ignore
249
+ position_ids=position_ids,
250
+ labels=labels,
251
+ )
252
+ else:
253
+ inputs_embeds, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input(
254
+ inputs=inputs_embeds,
255
+ attention_mask=attention_mask, # type: ignore
256
+ position_ids=position_ids,
257
+ labels=labels,
258
+ )
259
+
260
+ outputs = self.model(
261
+ input_ids=input_ids,
262
+ attention_mask=attention_mask,
263
+ sliding_window_mask=sliding_window_mask,
264
+ position_ids=position_ids,
265
+ inputs_embeds=inputs_embeds,
266
+ indices=indices,
267
+ cu_seqlens=cu_seqlens,
268
+ max_seqlen=max_seqlen,
269
+ batch_size=batch_size,
270
+ seq_len=seq_len,
271
+ output_attentions=output_attentions,
272
+ output_hidden_states=output_hidden_states,
273
+ return_dict=return_dict,
274
+ )
275
+ last_hidden_state = outputs[0]
276
+
277
+ if self.sparse_prediction and labels is not None:
278
+ # flatten labels and output first
279
+ labels = labels.view(-1)
280
+ last_hidden_state = last_hidden_state.view(labels.shape[0], -1)
281
+
282
+ # then filter out the non-masked tokens
283
+ mask_tokens = labels != self.sparse_pred_ignore_index
284
+ last_hidden_state = last_hidden_state[mask_tokens]
285
+ labels = labels[mask_tokens]
286
+
287
+ logits = (
288
+ self.compiled_head(last_hidden_state)
289
+ if self.config.reference_compile
290
+ else self.decoder(self.head(last_hidden_state))
291
+ )
292
+
293
+ loss = None
294
+ if labels is not None:
295
+ loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs)
296
+
297
+ if self.config._attn_implementation == "flash_attention_2":
298
+ with nullcontext() if self.config.repad_logits_with_grad or labels is None else torch.no_grad():
299
+ logits = _pad_modernbert_output(inputs=logits, indices=indices, batch=batch_size, seqlen=seq_len) # type: ignore
300
+
301
+ if not return_dict:
302
+ output = (logits,)
303
+ return ((loss,) + output) if loss is not None else output
304
+
305
+ return MaskedLMOutput(
306
+ loss=loss,
307
+ logits=typing.cast(torch.FloatTensor, logits),
308
+ hidden_states=outputs.hidden_states,
309
+ attentions=outputs.attentions,
310
+ )
311
+
312
+
313
+ class ModChemBertForSequenceClassification(InitWeightsMixin, ModernBertPreTrainedModel):
314
+ config_class = ModChemBertConfig
315
+
316
+ def __init__(self, config: ModChemBertConfig):
317
+ super().__init__(config)
318
+ self.num_labels = config.num_labels
319
+ self.config = config
320
+
321
+ self.model = ModernBertModel(config)
322
+ if self.config.classifier_pooling in {"cls_mha", "max_seq_mha"}:
323
+ self.pooling_attn = ModChemBertPoolingAttention(config=self.config)
324
+ else:
325
+ self.pooling_attn = None
326
+ self.head = ModernBertPredictionHead(config)
327
+ self.drop = torch.nn.Dropout(config.classifier_dropout)
328
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
329
+
330
+ # Initialize weights and apply final processing
331
+ self.post_init()
332
+
333
+ def forward(
334
+ self,
335
+ input_ids: torch.LongTensor | None = None,
336
+ attention_mask: torch.Tensor | None = None,
337
+ sliding_window_mask: torch.Tensor | None = None,
338
+ position_ids: torch.Tensor | None = None,
339
+ inputs_embeds: torch.Tensor | None = None,
340
+ labels: torch.Tensor | None = None,
341
+ indices: torch.Tensor | None = None,
342
+ cu_seqlens: torch.Tensor | None = None,
343
+ max_seqlen: int | None = None,
344
+ batch_size: int | None = None,
345
+ seq_len: int | None = None,
346
+ output_attentions: bool | None = None,
347
+ output_hidden_states: bool | None = None,
348
+ return_dict: bool | None = None,
349
+ **kwargs,
350
+ ) -> tuple[torch.Tensor] | tuple[torch.Tensor, typing.Any] | SequenceClassifierOutput:
351
+ r"""
352
+ sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
353
+ Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
354
+ perform global attention, while the rest perform local attention. This mask is used to avoid attending to
355
+ far-away tokens in the local attention layers when not using Flash Attention.
356
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
357
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
358
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
359
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
360
+ indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
361
+ Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
362
+ cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
363
+ Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
364
+ max_seqlen (`int`, *optional*):
365
+ Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids & pad output tensors.
366
+ batch_size (`int`, *optional*):
367
+ Batch size of the input sequences. Used to pad the output tensors.
368
+ seq_len (`int`, *optional*):
369
+ Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
370
+ """
371
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
372
+ self._maybe_set_compile()
373
+
374
+ if input_ids is not None:
375
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
376
+
377
+ if batch_size is None and seq_len is None:
378
+ if inputs_embeds is not None:
379
+ batch_size, seq_len = inputs_embeds.shape[:2]
380
+ else:
381
+ batch_size, seq_len = input_ids.shape[:2] # type: ignore
382
+ device = input_ids.device if input_ids is not None else inputs_embeds.device # type: ignore
383
+
384
+ if attention_mask is None:
385
+ attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) # type: ignore
386
+
387
+ # Ensure output_hidden_states is True in case pooling mode requires all hidden states
388
+ output_hidden_states = True
389
+
390
+ outputs = self.model(
391
+ input_ids=input_ids,
392
+ attention_mask=attention_mask,
393
+ sliding_window_mask=sliding_window_mask,
394
+ position_ids=position_ids,
395
+ inputs_embeds=inputs_embeds,
396
+ indices=indices,
397
+ cu_seqlens=cu_seqlens,
398
+ max_seqlen=max_seqlen,
399
+ batch_size=batch_size,
400
+ seq_len=seq_len,
401
+ output_attentions=output_attentions,
402
+ output_hidden_states=output_hidden_states,
403
+ return_dict=return_dict,
404
+ )
405
+ last_hidden_state = outputs[0]
406
+ hidden_states = outputs[1]
407
+
408
+ last_hidden_state = _pool_modchembert_output(
409
+ self,
410
+ last_hidden_state,
411
+ hidden_states,
412
+ typing.cast(torch.Tensor, attention_mask),
413
+ )
414
+ pooled_output = self.head(last_hidden_state)
415
+ pooled_output = self.drop(pooled_output)
416
+ logits = self.classifier(pooled_output)
417
+
418
+ loss = None
419
+ if labels is not None:
420
+ if self.config.problem_type is None:
421
+ if self.num_labels == 1:
422
+ self.config.problem_type = "regression"
423
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
424
+ self.config.problem_type = "single_label_classification"
425
+ else:
426
+ self.config.problem_type = "multi_label_classification"
427
+
428
+ if self.config.problem_type == "regression":
429
+ loss_fct = MSELoss()
430
+ if self.num_labels == 1:
431
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
432
+ else:
433
+ loss = loss_fct(logits, labels)
434
+ elif self.config.problem_type == "single_label_classification":
435
+ loss_fct = CrossEntropyLoss()
436
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
437
+ elif self.config.problem_type == "multi_label_classification":
438
+ loss_fct = BCEWithLogitsLoss()
439
+ loss = loss_fct(logits, labels)
440
+
441
+ if not return_dict:
442
+ output = (logits,)
443
+ return ((loss,) + output) if loss is not None else output
444
+
445
+ return SequenceClassifierOutput(
446
+ loss=loss,
447
+ logits=logits,
448
+ hidden_states=outputs.hidden_states,
449
+ attentions=outputs.attentions,
450
+ )
451
+
452
+
453
+ def _pool_modchembert_output(
454
+ module: ModChemBertForSequenceClassification,
455
+ last_hidden_state: torch.Tensor,
456
+ hidden_states: list[torch.Tensor],
457
+ attention_mask: torch.Tensor,
458
+ ):
459
+ """
460
+ Apply pooling strategy to hidden states for sequence-level classification/regression tasks.
461
+
462
+ This function implements various pooling strategies to aggregate sequence representations
463
+ into a single vector for downstream classification or regression tasks. The pooling method
464
+ is determined by the `classifier_pooling` configuration parameter.
465
+
466
+ Available pooling strategies:
467
+ - cls: Use the CLS token ([CLS]) representation from the last hidden state
468
+ - mean: Average pooling over all tokens in the sequence (attention-weighted)
469
+ - max_cls: Element-wise max pooling over the last k hidden states, then take CLS token
470
+ - cls_mha: Multi-head attention with CLS token as query and full sequence as keys/values
471
+ - max_seq_mha: Max pooling over last k states + multi-head attention with CLS as query
472
+ - max_seq_mean: Max pooling over last k hidden states, then mean pooling over sequence
473
+ - sum_mean: Sum all hidden states across layers, then mean pool over sequence
474
+ - sum_sum: Sum all hidden states across layers, then sum pool over sequence
475
+ - mean_sum: Mean all hidden states across layers, then sum pool over sequence
476
+ - mean_mean: Mean all hidden states across layers, then mean pool over sequence
477
+
478
+ Args:
479
+ module: The model instance containing configuration and pooling attention if needed
480
+ last_hidden_state: Final layer hidden states of shape (batch_size, seq_len, hidden_size)
481
+ hidden_states: List of hidden states from all layers, each of shape (batch_size, seq_len, hidden_size)
482
+ attention_mask: Attention mask of shape (batch_size, seq_len) indicating valid tokens
483
+
484
+ Returns:
485
+ torch.Tensor: Pooled representation of shape (batch_size, hidden_size)
486
+
487
+ Note:
488
+ Some pooling strategies (cls_mha, max_seq_mha) require the module to have a pooling_attn
489
+ attribute containing a ModChemBertPoolingAttention instance.
490
+ """
491
+ config = typing.cast(ModChemBertConfig, module.config)
492
+ if config.classifier_pooling == "cls":
493
+ last_hidden_state = last_hidden_state[:, 0]
494
+ elif config.classifier_pooling == "mean":
495
+ last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum(
496
+ dim=1, keepdim=True
497
+ )
498
+ elif config.classifier_pooling == "max_cls":
499
+ k_hidden_states = hidden_states[-config.classifier_pooling_last_k :]
500
+ theta = torch.stack(k_hidden_states, dim=1) # (batch, k, seq_len, hidden)
501
+ pooled_seq = torch.max(theta, dim=1).values # Element-wise max over k -> (batch, seq_len, hidden)
502
+ last_hidden_state = pooled_seq[:, 0, :] # (batch, hidden)
503
+ elif config.classifier_pooling == "cls_mha":
504
+ # Similar to max_seq_mha but without the max pooling step
505
+ # Query is CLS token (position 0); Keys/Values are full sequence
506
+ q = last_hidden_state[:, 0, :].unsqueeze(1) # (batch, 1, hidden)
507
+ q = q.expand(-1, last_hidden_state.shape[1], -1) # (batch, seq_len, hidden)
508
+ attn_out: torch.Tensor = module.pooling_attn( # type: ignore
509
+ q=q, kv=last_hidden_state, attention_mask=attention_mask
510
+ ) # (batch, seq_len, hidden)
511
+ last_hidden_state = torch.mean(attn_out, dim=1)
512
+ elif config.classifier_pooling == "max_seq_mha":
513
+ k_hidden_states = hidden_states[-config.classifier_pooling_last_k :]
514
+ theta = torch.stack(k_hidden_states, dim=1) # (batch, k, seq_len, hidden)
515
+ pooled_seq = torch.max(theta, dim=1).values # Element-wise max over k -> (batch, seq_len, hidden)
516
+ # Query is pooled CLS token (position 0); Keys/Values are pooled sequence
517
+ q = pooled_seq[:, 0, :].unsqueeze(1) # (batch, 1, hidden)
518
+ q = q.expand(-1, pooled_seq.shape[1], -1) # (batch, seq_len, hidden)
519
+ attn_out: torch.Tensor = module.pooling_attn( # type: ignore
520
+ q=q, kv=pooled_seq, attention_mask=attention_mask
521
+ ) # (batch, seq_len, hidden)
522
+ last_hidden_state = torch.mean(attn_out, dim=1)
523
+ elif config.classifier_pooling == "max_seq_mean":
524
+ k_hidden_states = hidden_states[-config.classifier_pooling_last_k :]
525
+ theta = torch.stack(k_hidden_states, dim=1) # (batch, k, seq_len, hidden)
526
+ pooled_seq = torch.max(theta, dim=1).values # Element-wise max over k -> (batch, seq_len, hidden)
527
+ last_hidden_state = torch.mean(pooled_seq, dim=1) # Mean over sequence length
528
+ elif config.classifier_pooling == "sum_mean":
529
+ # ChemLM uses the mean of all hidden states
530
+ # which outperforms using just the last layer mean or the cls embedding
531
+ # https://doi.org/10.1038/s42004-025-01484-4
532
+ # https://static-content.springer.com/esm/art%3A10.1038%2Fs42004-025-01484-4/MediaObjects/42004_2025_1484_MOESM2_ESM.pdf
533
+ all_hidden_states = torch.stack(hidden_states)
534
+ w = torch.sum(all_hidden_states, dim=0)
535
+ last_hidden_state = torch.mean(w, dim=1)
536
+ elif config.classifier_pooling == "sum_sum":
537
+ all_hidden_states = torch.stack(hidden_states)
538
+ w = torch.sum(all_hidden_states, dim=0)
539
+ last_hidden_state = torch.sum(w, dim=1)
540
+ elif config.classifier_pooling == "mean_sum":
541
+ all_hidden_states = torch.stack(hidden_states)
542
+ w = torch.mean(all_hidden_states, dim=0)
543
+ last_hidden_state = torch.sum(w, dim=1)
544
+ elif config.classifier_pooling == "mean_mean":
545
+ all_hidden_states = torch.stack(hidden_states)
546
+ w = torch.mean(all_hidden_states, dim=0)
547
+ last_hidden_state = torch.mean(w, dim=1)
548
+ return last_hidden_state
549
+
550
+
551
+ __all__ = [
552
+ "ModChemBertForMaskedLM",
553
+ "ModChemBertForSequenceClassification",
554
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
@@ -0,0 +1,2554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 256,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": "BatchLongest",
11
+ "direction": "Right",
12
+ "pad_to_multiple_of": 8,
13
+ "pad_id": 2,
14
+ "pad_type_id": 0,
15
+ "pad_token": "[PAD]"
16
+ },
17
+ "added_tokens": [
18
+ {
19
+ "id": 0,
20
+ "content": "[CLS]",
21
+ "single_word": false,
22
+ "lstrip": false,
23
+ "rstrip": false,
24
+ "normalized": false,
25
+ "special": true
26
+ },
27
+ {
28
+ "id": 1,
29
+ "content": "[SEP]",
30
+ "single_word": false,
31
+ "lstrip": false,
32
+ "rstrip": false,
33
+ "normalized": false,
34
+ "special": true
35
+ },
36
+ {
37
+ "id": 2,
38
+ "content": "[PAD]",
39
+ "single_word": false,
40
+ "lstrip": false,
41
+ "rstrip": false,
42
+ "normalized": false,
43
+ "special": true
44
+ },
45
+ {
46
+ "id": 3,
47
+ "content": "[MASK]",
48
+ "single_word": false,
49
+ "lstrip": false,
50
+ "rstrip": false,
51
+ "normalized": false,
52
+ "special": true
53
+ },
54
+ {
55
+ "id": 2361,
56
+ "content": "[UNK]",
57
+ "single_word": false,
58
+ "lstrip": false,
59
+ "rstrip": false,
60
+ "normalized": false,
61
+ "special": true
62
+ }
63
+ ],
64
+ "normalizer": null,
65
+ "pre_tokenizer": {
66
+ "type": "ByteLevel",
67
+ "add_prefix_space": false,
68
+ "trim_offsets": true,
69
+ "use_regex": true
70
+ },
71
+ "post_processor": {
72
+ "type": "TemplateProcessing",
73
+ "single": [
74
+ {
75
+ "SpecialToken": {
76
+ "id": "[CLS]",
77
+ "type_id": 0
78
+ }
79
+ },
80
+ {
81
+ "Sequence": {
82
+ "id": "A",
83
+ "type_id": 0
84
+ }
85
+ },
86
+ {
87
+ "SpecialToken": {
88
+ "id": "[SEP]",
89
+ "type_id": 0
90
+ }
91
+ }
92
+ ],
93
+ "pair": [
94
+ {
95
+ "SpecialToken": {
96
+ "id": "[CLS]",
97
+ "type_id": 0
98
+ }
99
+ },
100
+ {
101
+ "Sequence": {
102
+ "id": "A",
103
+ "type_id": 0
104
+ }
105
+ },
106
+ {
107
+ "SpecialToken": {
108
+ "id": "[SEP]",
109
+ "type_id": 0
110
+ }
111
+ },
112
+ {
113
+ "Sequence": {
114
+ "id": "B",
115
+ "type_id": 0
116
+ }
117
+ },
118
+ {
119
+ "SpecialToken": {
120
+ "id": "[SEP]",
121
+ "type_id": 0
122
+ }
123
+ }
124
+ ],
125
+ "special_tokens": {
126
+ "[CLS]": {
127
+ "id": "[CLS]",
128
+ "ids": [
129
+ 0
130
+ ],
131
+ "tokens": [
132
+ "[CLS]"
133
+ ]
134
+ },
135
+ "[MASK]": {
136
+ "id": "[MASK]",
137
+ "ids": [
138
+ 3
139
+ ],
140
+ "tokens": [
141
+ "[MASK]"
142
+ ]
143
+ },
144
+ "[PAD]": {
145
+ "id": "[PAD]",
146
+ "ids": [
147
+ 2
148
+ ],
149
+ "tokens": [
150
+ "[PAD]"
151
+ ]
152
+ },
153
+ "[SEP]": {
154
+ "id": "[SEP]",
155
+ "ids": [
156
+ 1
157
+ ],
158
+ "tokens": [
159
+ "[SEP]"
160
+ ]
161
+ },
162
+ "[UNK]": {
163
+ "id": "[UNK]",
164
+ "ids": [
165
+ 2361
166
+ ],
167
+ "tokens": [
168
+ "[UNK]"
169
+ ]
170
+ }
171
+ }
172
+ },
173
+ "decoder": {
174
+ "type": "ByteLevel",
175
+ "add_prefix_space": false,
176
+ "trim_offsets": true,
177
+ "use_regex": true
178
+ },
179
+ "model": {
180
+ "type": "BPE",
181
+ "dropout": null,
182
+ "unk_token": "[UNK]",
183
+ "continuing_subword_prefix": null,
184
+ "end_of_word_suffix": null,
185
+ "fuse_unk": false,
186
+ "byte_fallback": false,
187
+ "ignore_merges": false,
188
+ "vocab": {
189
+ "[CLS]": 0,
190
+ "[SEP]": 1,
191
+ "[PAD]": 2,
192
+ "[MASK]": 3,
193
+ "C": 4,
194
+ "c": 5,
195
+ "(": 6,
196
+ ")": 7,
197
+ "1": 8,
198
+ "O": 9,
199
+ "N": 10,
200
+ "2": 11,
201
+ "=": 12,
202
+ "n": 13,
203
+ "3": 14,
204
+ "[C@H]": 15,
205
+ "[C@@H]": 16,
206
+ "F": 17,
207
+ "S": 18,
208
+ "4": 19,
209
+ "Cl": 20,
210
+ "-": 21,
211
+ "o": 22,
212
+ "s": 23,
213
+ "[nH]": 24,
214
+ "#": 25,
215
+ "/": 26,
216
+ "Br": 27,
217
+ "[C@]": 28,
218
+ "[C@@]": 29,
219
+ "[N+]": 30,
220
+ "[O-]": 31,
221
+ "5": 32,
222
+ "\\": 33,
223
+ ".": 34,
224
+ "I": 35,
225
+ "6": 36,
226
+ "[S@]": 37,
227
+ "[S@@]": 38,
228
+ "P": 39,
229
+ "[N-]": 40,
230
+ "[Si]": 41,
231
+ "7": 42,
232
+ "[n+]": 43,
233
+ "[2H]": 44,
234
+ "8": 45,
235
+ "[NH+]": 46,
236
+ "B": 47,
237
+ "9": 48,
238
+ "[C-]": 49,
239
+ "[Na+]": 50,
240
+ "[Cl-]": 51,
241
+ "[c-]": 52,
242
+ "[CH]": 53,
243
+ "%10": 54,
244
+ "[NH2+]": 55,
245
+ "[P+]": 56,
246
+ "[B]": 57,
247
+ "[I-]": 58,
248
+ "%11": 59,
249
+ "[CH2-]": 60,
250
+ "[O+]": 61,
251
+ "[NH3+]": 62,
252
+ "[C]": 63,
253
+ "[Br-]": 64,
254
+ "[IH2]": 65,
255
+ "[S-]": 66,
256
+ "[cH-]": 67,
257
+ "%12": 68,
258
+ "[nH+]": 69,
259
+ "[B-]": 70,
260
+ "[K+]": 71,
261
+ "[Sn]": 72,
262
+ "[Se]": 73,
263
+ "[CH-]": 74,
264
+ "[HH]": 75,
265
+ "[Y]": 76,
266
+ "[n-]": 77,
267
+ "[CH3-]": 78,
268
+ "[SiH]": 79,
269
+ "[S+]": 80,
270
+ "%13": 81,
271
+ "[SiH2]": 82,
272
+ "[Li+]": 83,
273
+ "[NH-]": 84,
274
+ "%14": 85,
275
+ "[Na]": 86,
276
+ "[CH2]": 87,
277
+ "[O-2]": 88,
278
+ "[U+2]": 89,
279
+ "[W]": 90,
280
+ "[Al]": 91,
281
+ "[P@]": 92,
282
+ "[Fe+2]": 93,
283
+ "[PH+]": 94,
284
+ "%15": 95,
285
+ "[Cl+3]": 96,
286
+ "[Zn+2]": 97,
287
+ "[Ir]": 98,
288
+ "[Mg+2]": 99,
289
+ "[Pt+2]": 100,
290
+ "[OH2+]": 101,
291
+ "[As]": 102,
292
+ "[Fe]": 103,
293
+ "[OH+]": 104,
294
+ "[Zr+2]": 105,
295
+ "[3H]": 106,
296
+ "[Ge]": 107,
297
+ "[SiH3]": 108,
298
+ "[OH-]": 109,
299
+ "[NH4+]": 110,
300
+ "[Cu+2]": 111,
301
+ "[P@@]": 112,
302
+ "p": 113,
303
+ "[Pt]": 114,
304
+ "%16": 115,
305
+ "[Ca+2]": 116,
306
+ "[Zr]": 117,
307
+ "[F-]": 118,
308
+ "[C+]": 119,
309
+ "[Ti]": 120,
310
+ "[P-]": 121,
311
+ "[V]": 122,
312
+ "[se]": 123,
313
+ "[U]": 124,
314
+ "[O]": 125,
315
+ "[Ni+2]": 126,
316
+ "[Zn]": 127,
317
+ "[Co]": 128,
318
+ "[Ni]": 129,
319
+ "[Pd+2]": 130,
320
+ "[Cu]": 131,
321
+ "%17": 132,
322
+ "[Cu+]": 133,
323
+ "[Te]": 134,
324
+ "[H+]": 135,
325
+ "[CH+]": 136,
326
+ "[Li]": 137,
327
+ "[Pd]": 138,
328
+ "[Mo]": 139,
329
+ "[Ru+2]": 140,
330
+ "[o+]": 141,
331
+ "[Re]": 142,
332
+ "[SH+]": 143,
333
+ "%18": 144,
334
+ "[Ac]": 145,
335
+ "[Cr]": 146,
336
+ "[NH2-]": 147,
337
+ "[K]": 148,
338
+ "[13CH2]": 149,
339
+ "[c]": 150,
340
+ "[Zr+4]": 151,
341
+ "[Tl]": 152,
342
+ "[13C]": 153,
343
+ "[Mn]": 154,
344
+ "[N@+]": 155,
345
+ "[Hg]": 156,
346
+ "[Rh]": 157,
347
+ "[Ti+4]": 158,
348
+ "[Sb]": 159,
349
+ "[Co+2]": 160,
350
+ "[Ag+]": 161,
351
+ "[Ru]": 162,
352
+ "%19": 163,
353
+ "[N@@+]": 164,
354
+ "[Ti+2]": 165,
355
+ "[Al+3]": 166,
356
+ "[Pb]": 167,
357
+ "[I+]": 168,
358
+ "[18F]": 169,
359
+ "[s+]": 170,
360
+ "[Rb+]": 171,
361
+ "[Ba+2]": 172,
362
+ "[H-]": 173,
363
+ "[Fe+3]": 174,
364
+ "[Ir+3]": 175,
365
+ "[13cH]": 176,
366
+ "%20": 177,
367
+ "[AlH2]": 178,
368
+ "[Au+]": 179,
369
+ "[13c]": 180,
370
+ "[SH2+]": 181,
371
+ "[Sn+2]": 182,
372
+ "[Mn+2]": 183,
373
+ "[Si-]": 184,
374
+ "[Ag]": 185,
375
+ "[N]": 186,
376
+ "[Bi]": 187,
377
+ "%21": 188,
378
+ "[In]": 189,
379
+ "[CH2+]": 190,
380
+ "[Y+3]": 191,
381
+ "[Ga]": 192,
382
+ "%22": 193,
383
+ "[Co+3]": 194,
384
+ "[Au]": 195,
385
+ "[13CH3]": 196,
386
+ "[Mg]": 197,
387
+ "[Cs+]": 198,
388
+ "[W+2]": 199,
389
+ "[Hf]": 200,
390
+ "[Zn+]": 201,
391
+ "[Se-]": 202,
392
+ "[S-2]": 203,
393
+ "[Ca]": 204,
394
+ "[pH]": 205,
395
+ "[ClH+]": 206,
396
+ "[Ti+3]": 207,
397
+ "%23": 208,
398
+ "[Ru+]": 209,
399
+ "[SH-]": 210,
400
+ "[13CH]": 211,
401
+ "[IH+]": 212,
402
+ "[Hf+4]": 213,
403
+ "[Rf]": 214,
404
+ "[OH3+]": 215,
405
+ "%24": 216,
406
+ "[Pt+4]": 217,
407
+ "[Zr+3]": 218,
408
+ "[PH3+]": 219,
409
+ "[Sr+2]": 220,
410
+ "[Cd+2]": 221,
411
+ "[Cd]": 222,
412
+ "%25": 223,
413
+ "[Os]": 224,
414
+ "[BH-]": 225,
415
+ "[Sn+4]": 226,
416
+ "[Cr+3]": 227,
417
+ "[Ru+3]": 228,
418
+ "[PH2+]": 229,
419
+ "[Rh+2]": 230,
420
+ "[V+2]": 231,
421
+ "%26": 232,
422
+ "[Gd+3]": 233,
423
+ "[Pb+2]": 234,
424
+ "[PH]": 235,
425
+ "[Hg+]": 236,
426
+ "[Mo+2]": 237,
427
+ "[AlH]": 238,
428
+ "[Sn+]": 239,
429
+ "%27": 240,
430
+ "[Pd+]": 241,
431
+ "b": 242,
432
+ "[Rh+3]": 243,
433
+ "[Hg+2]": 244,
434
+ "[15NH]": 245,
435
+ "[14C]": 246,
436
+ "%28": 247,
437
+ "[Mn+3]": 248,
438
+ "[Si+]": 249,
439
+ "[SeH]": 250,
440
+ "[13C@H]": 251,
441
+ "[NH]": 252,
442
+ "[Ga+3]": 253,
443
+ "[SiH-]": 254,
444
+ "[13C@@H]": 255,
445
+ "[Ce]": 256,
446
+ "[Au+3]": 257,
447
+ "[Bi+3]": 258,
448
+ "[15N]": 259,
449
+ "%29": 260,
450
+ "[BH3-]": 261,
451
+ "[14cH]": 262,
452
+ "[Ti+]": 263,
453
+ "[Gd]": 264,
454
+ "[cH+]": 265,
455
+ "[Cr+2]": 266,
456
+ "[Sb-]": 267,
457
+ "%30": 268,
458
+ "[Be+2]": 269,
459
+ "[Al+]": 270,
460
+ "[te]": 271,
461
+ "[11CH3]": 272,
462
+ "[Sm]": 273,
463
+ "[Pr]": 274,
464
+ "[La]": 275,
465
+ "%31": 276,
466
+ "[Al-]": 277,
467
+ "[Ta]": 278,
468
+ "[125I]": 279,
469
+ "[BH2-]": 280,
470
+ "[Nb]": 281,
471
+ "[Si@]": 282,
472
+ "%32": 283,
473
+ "[14c]": 284,
474
+ "[Sb+3]": 285,
475
+ "[Ba]": 286,
476
+ "%33": 287,
477
+ "[Os+2]": 288,
478
+ "[Si@@]": 289,
479
+ "[La+3]": 290,
480
+ "[15n]": 291,
481
+ "[15NH2]": 292,
482
+ "[Nd+3]": 293,
483
+ "%34": 294,
484
+ "[14CH2]": 295,
485
+ "[18O]": 296,
486
+ "[Nd]": 297,
487
+ "[GeH]": 298,
488
+ "[Ni+3]": 299,
489
+ "[Eu]": 300,
490
+ "[Dy+3]": 301,
491
+ "[Sc]": 302,
492
+ "%36": 303,
493
+ "[Se-2]": 304,
494
+ "[As+]": 305,
495
+ "%35": 306,
496
+ "[AsH]": 307,
497
+ "[Tb]": 308,
498
+ "[Sb+5]": 309,
499
+ "[Se+]": 310,
500
+ "[Ce+3]": 311,
501
+ "[c+]": 312,
502
+ "[In+3]": 313,
503
+ "[SnH]": 314,
504
+ "[Mo+4]": 315,
505
+ "%37": 316,
506
+ "[V+4]": 317,
507
+ "[Eu+3]": 318,
508
+ "[Hf+2]": 319,
509
+ "%38": 320,
510
+ "[Pt+]": 321,
511
+ "[p+]": 322,
512
+ "[123I]": 323,
513
+ "[Tl+]": 324,
514
+ "[Sm+3]": 325,
515
+ "%39": 326,
516
+ "[Yb+3]": 327,
517
+ "%40": 328,
518
+ "[Yb]": 329,
519
+ "[Os+]": 330,
520
+ "%41": 331,
521
+ "[10B]": 332,
522
+ "[Sc+3]": 333,
523
+ "[Al+2]": 334,
524
+ "%42": 335,
525
+ "[Sr]": 336,
526
+ "[Tb+3]": 337,
527
+ "[Po]": 338,
528
+ "[Tc]": 339,
529
+ "[PH-]": 340,
530
+ "[AlH3]": 341,
531
+ "[Ar]": 342,
532
+ "[U+4]": 343,
533
+ "[SnH2]": 344,
534
+ "[Cl+2]": 345,
535
+ "[si]": 346,
536
+ "[Fe+]": 347,
537
+ "[14CH3]": 348,
538
+ "[U+3]": 349,
539
+ "[Cl+]": 350,
540
+ "%43": 351,
541
+ "[GeH2]": 352,
542
+ "%44": 353,
543
+ "[Er+3]": 354,
544
+ "[Mo+3]": 355,
545
+ "[I+2]": 356,
546
+ "[Fe+4]": 357,
547
+ "[99Tc]": 358,
548
+ "%45": 359,
549
+ "[11C]": 360,
550
+ "%46": 361,
551
+ "[SnH3]": 362,
552
+ "[S]": 363,
553
+ "[Te+]": 364,
554
+ "[Er]": 365,
555
+ "[Lu+3]": 366,
556
+ "[11B]": 367,
557
+ "%47": 368,
558
+ "%48": 369,
559
+ "[P]": 370,
560
+ "[Tm]": 371,
561
+ "[Th]": 372,
562
+ "[Dy]": 373,
563
+ "[Pr+3]": 374,
564
+ "[Ta+5]": 375,
565
+ "[Nb+5]": 376,
566
+ "[Rb]": 377,
567
+ "[GeH3]": 378,
568
+ "[Br+2]": 379,
569
+ "%49": 380,
570
+ "[131I]": 381,
571
+ "[Fm]": 382,
572
+ "[Cs]": 383,
573
+ "[BH4-]": 384,
574
+ "[Lu]": 385,
575
+ "[15nH]": 386,
576
+ "%50": 387,
577
+ "[Ru+6]": 388,
578
+ "[b-]": 389,
579
+ "[Ho]": 390,
580
+ "[Th+4]": 391,
581
+ "[Ru+4]": 392,
582
+ "%52": 393,
583
+ "[14CH]": 394,
584
+ "%51": 395,
585
+ "[Cr+6]": 396,
586
+ "[18OH]": 397,
587
+ "[Ho+3]": 398,
588
+ "[Ce+4]": 399,
589
+ "[Bi+2]": 400,
590
+ "[Co+]": 401,
591
+ "%53": 402,
592
+ "[Yb+2]": 403,
593
+ "[Fe+6]": 404,
594
+ "[Be]": 405,
595
+ "%54": 406,
596
+ "[SH3+]": 407,
597
+ "[Np]": 408,
598
+ "[As-]": 409,
599
+ "%55": 410,
600
+ "[14C@@H]": 411,
601
+ "[Ir+2]": 412,
602
+ "[GaH3]": 413,
603
+ "[p-]": 414,
604
+ "[GeH4]": 415,
605
+ "[Sn+3]": 416,
606
+ "[Os+4]": 417,
607
+ "%56": 418,
608
+ "[14C@H]": 419,
609
+ "[sH+]": 420,
610
+ "[19F]": 421,
611
+ "[Eu+2]": 422,
612
+ "[TlH]": 423,
613
+ "%57": 424,
614
+ "[Cr+4]": 425,
615
+ "%58": 426,
616
+ "[B@@-]": 427,
617
+ "[SiH+]": 428,
618
+ "[At]": 429,
619
+ "[Am]": 430,
620
+ "[Fe+5]": 431,
621
+ "[AsH2]": 432,
622
+ "[Si+4]": 433,
623
+ "[B@-]": 434,
624
+ "[Pu]": 435,
625
+ "[SbH]": 436,
626
+ "[P-2]": 437,
627
+ "[Tm+3]": 438,
628
+ "*": 439,
629
+ "%59": 440,
630
+ "[se+]": 441,
631
+ "%60": 442,
632
+ "[oH+]": 443,
633
+ "[1H]": 444,
634
+ "[15N+]": 445,
635
+ "[124I]": 446,
636
+ "[S@@+]": 447,
637
+ "[P-3]": 448,
638
+ "[H]": 449,
639
+ "[IH2+]": 450,
640
+ "[TeH]": 451,
641
+ "[Xe]": 452,
642
+ "[PH4+]": 453,
643
+ "[Cr+]": 454,
644
+ "[Cm]": 455,
645
+ "[I+3]": 456,
646
+ "%61": 457,
647
+ "[Nb+2]": 458,
648
+ "[Ru+5]": 459,
649
+ "%62": 460,
650
+ "[Ta+2]": 461,
651
+ "[Tc+4]": 462,
652
+ "[CH3+]": 463,
653
+ "[Pm]": 464,
654
+ "[Si@H]": 465,
655
+ "[No]": 466,
656
+ "%63": 467,
657
+ "[Cr+5]": 468,
658
+ "[Th+2]": 469,
659
+ "[Zn-2]": 470,
660
+ "[13C@]": 471,
661
+ "[Lr]": 472,
662
+ "%64": 473,
663
+ "[99Tc+3]": 474,
664
+ "%65": 475,
665
+ "[13C@@]": 476,
666
+ "%66": 477,
667
+ "[Fe-]": 478,
668
+ "[17O]": 479,
669
+ "[siH]": 480,
670
+ "[Sb+]": 481,
671
+ "[OH]": 482,
672
+ "[IH]": 483,
673
+ "[11CH2]": 484,
674
+ "[Cf]": 485,
675
+ "[SiH2+]": 486,
676
+ "[Gd+2]": 487,
677
+ "[In+]": 488,
678
+ "[Si@@H]": 489,
679
+ "[Mn+]": 490,
680
+ "[99Tc+4]": 491,
681
+ "[Ga-]": 492,
682
+ "%67": 493,
683
+ "[S@+]": 494,
684
+ "[Ge+4]": 495,
685
+ "[Tl+3]": 496,
686
+ "[16OH]": 497,
687
+ "%68": 498,
688
+ "[2H-]": 499,
689
+ "[Ra]": 500,
690
+ "[si-]": 501,
691
+ "[NiH2]": 502,
692
+ "[P@@H]": 503,
693
+ "[Rh+]": 504,
694
+ "[12C]": 505,
695
+ "[35S]": 506,
696
+ "[32P]": 507,
697
+ "[SiH2-]": 508,
698
+ "[AlH2+]": 509,
699
+ "[16O]": 510,
700
+ "%69": 511,
701
+ "[BiH]": 512,
702
+ "[BiH2]": 513,
703
+ "[Zn-]": 514,
704
+ "[BH]": 515,
705
+ "[Tc+3]": 516,
706
+ "[Ir+]": 517,
707
+ "[Ni+]": 518,
708
+ "%70": 519,
709
+ "[InH2]": 520,
710
+ "[InH]": 521,
711
+ "[Nb+3]": 522,
712
+ "[PbH]": 523,
713
+ "[Bi+]": 524,
714
+ "%71": 525,
715
+ "[As+3]": 526,
716
+ "%72": 527,
717
+ "[18O-]": 528,
718
+ "[68Ga+3]": 529,
719
+ "%73": 530,
720
+ "[Pa]": 531,
721
+ "[76Br]": 532,
722
+ "[Tc+5]": 533,
723
+ "[pH+]": 534,
724
+ "[64Cu+2]": 535,
725
+ "[Ru+8]": 536,
726
+ "%74": 537,
727
+ "[PH2-]": 538,
728
+ "[Si+2]": 539,
729
+ "[17OH]": 540,
730
+ "[RuH]": 541,
731
+ "[111In+3]": 542,
732
+ "[AlH+]": 543,
733
+ "%75": 544,
734
+ "%76": 545,
735
+ "[W+]": 546,
736
+ "[SbH2]": 547,
737
+ "[PoH]": 548,
738
+ "[Ru-]": 549,
739
+ "[XeH]": 550,
740
+ "[Tc+2]": 551,
741
+ "[13C-]": 552,
742
+ "[Br+]": 553,
743
+ "[Pt-2]": 554,
744
+ "[Es]": 555,
745
+ "[Cu-]": 556,
746
+ "[Mg+]": 557,
747
+ "[3HH]": 558,
748
+ "[P@H]": 559,
749
+ "[ClH2+]": 560,
750
+ "%77": 561,
751
+ "[SH]": 562,
752
+ "[Au-]": 563,
753
+ "[2HH]": 564,
754
+ "%78": 565,
755
+ "[Sn-]": 566,
756
+ "[11CH]": 567,
757
+ "[PdH2]": 568,
758
+ "0": 569,
759
+ "[Os+6]": 570,
760
+ "%79": 571,
761
+ "[Mo+]": 572,
762
+ "%80": 573,
763
+ "[al]": 574,
764
+ "[PbH2]": 575,
765
+ "[64Cu]": 576,
766
+ "[Cl]": 577,
767
+ "[12CH3]": 578,
768
+ "%81": 579,
769
+ "[Tc+7]": 580,
770
+ "[11c]": 581,
771
+ "%82": 582,
772
+ "[Li-]": 583,
773
+ "[99Tc+5]": 584,
774
+ "[He]": 585,
775
+ "[12c]": 586,
776
+ "[Kr]": 587,
777
+ "[RuH+2]": 588,
778
+ "[35Cl]": 589,
779
+ "[Pd-2]": 590,
780
+ "[GaH2]": 591,
781
+ "[4H]": 592,
782
+ "[Sg]": 593,
783
+ "[Cu-2]": 594,
784
+ "[Br+3]": 595,
785
+ "%83": 596,
786
+ "[37Cl]": 597,
787
+ "[211At]": 598,
788
+ "[IrH+2]": 599,
789
+ "[Mt]": 600,
790
+ "[Ir-2]": 601,
791
+ "[In-]": 602,
792
+ "[12cH]": 603,
793
+ "[12CH2]": 604,
794
+ "[RuH2]": 605,
795
+ "[99Tc+7]": 606,
796
+ "%84": 607,
797
+ "[15n+]": 608,
798
+ "[ClH2+2]": 609,
799
+ "[16N]": 610,
800
+ "[111In]": 611,
801
+ "[Tc+]": 612,
802
+ "[Ru-2]": 613,
803
+ "[12CH]": 614,
804
+ "[si+]": 615,
805
+ "[Tc+6]": 616,
806
+ "%85": 617,
807
+ "%86": 618,
808
+ "[90Y]": 619,
809
+ "[Pd-]": 620,
810
+ "[188Re]": 621,
811
+ "[RuH+]": 622,
812
+ "[NiH]": 623,
813
+ "[SiH3-]": 624,
814
+ "[14n]": 625,
815
+ "[CH3]": 626,
816
+ "[14N]": 627,
817
+ "[10BH2]": 628,
818
+ "%88": 629,
819
+ "%89": 630,
820
+ "%90": 631,
821
+ "[34S]": 632,
822
+ "[77Br]": 633,
823
+ "[GaH]": 634,
824
+ "[Br]": 635,
825
+ "[Ge@]": 636,
826
+ "[B@@H-]": 637,
827
+ "[CuH]": 638,
828
+ "[SiH4]": 639,
829
+ "[3H-]": 640,
830
+ "%87": 641,
831
+ "%91": 642,
832
+ "%92": 643,
833
+ "[67Cu]": 644,
834
+ "[I]": 645,
835
+ "[177Lu]": 646,
836
+ "[ReH]": 647,
837
+ "[67Ga+3]": 648,
838
+ "[Db]": 649,
839
+ "[177Lu+3]": 650,
840
+ "[AlH2-]": 651,
841
+ "[Si+3]": 652,
842
+ "[Ti-2]": 653,
843
+ "[RuH+3]": 654,
844
+ "[al+]": 655,
845
+ "[68Ga]": 656,
846
+ "[2H+]": 657,
847
+ "[B@H-]": 658,
848
+ "[WH2]": 659,
849
+ "[OsH]": 660,
850
+ "[Ir-3]": 661,
851
+ "[AlH-]": 662,
852
+ "[Bk]": 663,
853
+ "[75Se]": 664,
854
+ "[14C@]": 665,
855
+ "[Pt-]": 666,
856
+ "[N@@H+]": 667,
857
+ "[Nb-]": 668,
858
+ "[13NH2]": 669,
859
+ "%93": 670,
860
+ "[186Re]": 671,
861
+ "[Tb+4]": 672,
862
+ "[PtH]": 673,
863
+ "[IrH2]": 674,
864
+ "[Hg-2]": 675,
865
+ "[AlH3-]": 676,
866
+ "[PdH+]": 677,
867
+ "[Md]": 678,
868
+ "[RhH+2]": 679,
869
+ "[11cH]": 680,
870
+ "[Co-2]": 681,
871
+ "[15N-]": 682,
872
+ "[ZrH2]": 683,
873
+ "%94": 684,
874
+ "[Hg-]": 685,
875
+ "[127I]": 686,
876
+ "[AsH2+]": 687,
877
+ "[MoH2]": 688,
878
+ "[Te+4]": 689,
879
+ "[14C@@]": 690,
880
+ "[As+5]": 691,
881
+ "[SnH+3]": 692,
882
+ "[Ge@@]": 693,
883
+ "[6Li+]": 694,
884
+ "[WH]": 695,
885
+ "[Ne]": 696,
886
+ "[14NH2]": 697,
887
+ "[14NH]": 698,
888
+ "[12C@@H]": 699,
889
+ "[Os+7]": 700,
890
+ "[RhH]": 701,
891
+ "[Al-3]": 702,
892
+ "[SnH+]": 703,
893
+ "[15NH3+]": 704,
894
+ "[Zr+]": 705,
895
+ "[197Hg+]": 706,
896
+ "%95": 707,
897
+ "%96": 708,
898
+ "[90Y+3]": 709,
899
+ "[Os-2]": 710,
900
+ "[98Tc+5]": 711,
901
+ "[15NH3]": 712,
902
+ "[bH-]": 713,
903
+ "[33P]": 714,
904
+ "[Zr-2]": 715,
905
+ "[15O]": 716,
906
+ "[Rh-]": 717,
907
+ "[PbH3]": 718,
908
+ "[PH2]": 719,
909
+ "[Ni-]": 720,
910
+ "[CuH+]": 721,
911
+ "%97": 722,
912
+ "%98": 723,
913
+ "%99": 724,
914
+ "[Os+5]": 725,
915
+ "[PtH+]": 726,
916
+ "[ReH4]": 727,
917
+ "[16NH]": 728,
918
+ "[82Br]": 729,
919
+ "[W-]": 730,
920
+ "[18F-]": 731,
921
+ "[15NH4+]": 732,
922
+ "[Se+4]": 733,
923
+ "[SeH-]": 734,
924
+ "[67Cu+2]": 735,
925
+ "[12C@H]": 736,
926
+ "[AsH3]": 737,
927
+ "[HgH]": 738,
928
+ "[10B-]": 739,
929
+ "[99Tc+6]": 740,
930
+ "[117Sn+4]": 741,
931
+ "[Te@]": 742,
932
+ "[P@+]": 743,
933
+ "[35SH]": 744,
934
+ "[SeH+]": 745,
935
+ "[Ni-2]": 746,
936
+ "[Al-2]": 747,
937
+ "[TeH2]": 748,
938
+ "[Bh]": 749,
939
+ "[99Tc+2]": 750,
940
+ "[Os+8]": 751,
941
+ "[PH-2]": 752,
942
+ "[7Li+]": 753,
943
+ "[14nH]": 754,
944
+ "[AlH+2]": 755,
945
+ "[18FH]": 756,
946
+ "[SnH4]": 757,
947
+ "[18O-2]": 758,
948
+ "[IrH]": 759,
949
+ "[13N]": 760,
950
+ "[Te@@]": 761,
951
+ "[Rh-3]": 762,
952
+ "[15NH+]": 763,
953
+ "[AsH3+]": 764,
954
+ "[SeH2]": 765,
955
+ "[AsH+]": 766,
956
+ "[CoH2]": 767,
957
+ "[16NH2]": 768,
958
+ "[AsH-]": 769,
959
+ "[203Hg+]": 770,
960
+ "[P@@+]": 771,
961
+ "[166Ho+3]": 772,
962
+ "[60Co+3]": 773,
963
+ "[13CH2-]": 774,
964
+ "[SeH2+]": 775,
965
+ "[75Br]": 776,
966
+ "[TlH2]": 777,
967
+ "[80Br]": 778,
968
+ "[siH+]": 779,
969
+ "[Ca+]": 780,
970
+ "[153Sm+3]": 781,
971
+ "[PdH]": 782,
972
+ "[225Ac]": 783,
973
+ "[13CH3-]": 784,
974
+ "[AlH4-]": 785,
975
+ "[FeH]": 786,
976
+ "[13CH-]": 787,
977
+ "[14C-]": 788,
978
+ "[11C-]": 789,
979
+ "[153Sm]": 790,
980
+ "[Re-]": 791,
981
+ "[te+]": 792,
982
+ "[13CH4]": 793,
983
+ "[ClH+2]": 794,
984
+ "[8CH2]": 795,
985
+ "[99Mo]": 796,
986
+ "[ClH3+3]": 797,
987
+ "[SbH3]": 798,
988
+ "[25Mg+2]": 799,
989
+ "[16N+]": 800,
990
+ "[SnH2+]": 801,
991
+ "[11C@H]": 802,
992
+ "[122I]": 803,
993
+ "[Re-2]": 804,
994
+ "[RuH2+2]": 805,
995
+ "[ZrH]": 806,
996
+ "[Bi-]": 807,
997
+ "[Pr+]": 808,
998
+ "[Rn]": 809,
999
+ "[Fr]": 810,
1000
+ "[36Cl]": 811,
1001
+ "[18o]": 812,
1002
+ "[YH]": 813,
1003
+ "[79Br]": 814,
1004
+ "[121I]": 815,
1005
+ "[113In+3]": 816,
1006
+ "[TaH]": 817,
1007
+ "[RhH2]": 818,
1008
+ "[Ta-]": 819,
1009
+ "[67Ga]": 820,
1010
+ "[ZnH+]": 821,
1011
+ "[SnH2-]": 822,
1012
+ "[OsH2]": 823,
1013
+ "[16F]": 824,
1014
+ "[FeH2]": 825,
1015
+ "[14O]": 826,
1016
+ "[PbH2+2]": 827,
1017
+ "[BH2]": 828,
1018
+ "[6H]": 829,
1019
+ "[125Te]": 830,
1020
+ "[197Hg]": 831,
1021
+ "[TaH2]": 832,
1022
+ "[TaH3]": 833,
1023
+ "[76As]": 834,
1024
+ "[Nb-2]": 835,
1025
+ "[14N+]": 836,
1026
+ "[125I-]": 837,
1027
+ "[33S]": 838,
1028
+ "[IH2+2]": 839,
1029
+ "[NH2]": 840,
1030
+ "[PtH2]": 841,
1031
+ "[MnH]": 842,
1032
+ "[19C]": 843,
1033
+ "[17F]": 844,
1034
+ "[1H-]": 845,
1035
+ "[SnH4+2]": 846,
1036
+ "[Mn-2]": 847,
1037
+ "[15NH2+]": 848,
1038
+ "[TiH2]": 849,
1039
+ "[ReH7]": 850,
1040
+ "[Cd-2]": 851,
1041
+ "[Fe-3]": 852,
1042
+ "[SH2]": 853,
1043
+ "[17O-]": 854,
1044
+ "[siH-]": 855,
1045
+ "[CoH+]": 856,
1046
+ "[VH]": 857,
1047
+ "[10BH]": 858,
1048
+ "[Ru-3]": 859,
1049
+ "[13O]": 860,
1050
+ "[5H]": 861,
1051
+ "[15n-]": 862,
1052
+ "[153Gd]": 863,
1053
+ "[12C@]": 864,
1054
+ "[11CH3-]": 865,
1055
+ "[IrH3]": 866,
1056
+ "[RuH3]": 867,
1057
+ "[74Se]": 868,
1058
+ "[Se@]": 869,
1059
+ "[Hf+]": 870,
1060
+ "[77Se]": 871,
1061
+ "[166Ho]": 872,
1062
+ "[59Fe+2]": 873,
1063
+ "[203Hg]": 874,
1064
+ "[18OH-]": 875,
1065
+ "[8CH]": 876,
1066
+ "[12C@@]": 877,
1067
+ "[11CH4]": 878,
1068
+ "[15C]": 879,
1069
+ "[249Cf]": 880,
1070
+ "[PbH4]": 881,
1071
+ "[64Zn]": 882,
1072
+ "[99Tc+]": 883,
1073
+ "[14c-]": 884,
1074
+ "[149Pm]": 885,
1075
+ "[IrH4]": 886,
1076
+ "[Se@@]": 887,
1077
+ "[13OH]": 888,
1078
+ "[14CH3-]": 889,
1079
+ "[28Si]": 890,
1080
+ "[Rh-2]": 891,
1081
+ "[Fe-2]": 892,
1082
+ "[131I-]": 893,
1083
+ "[51Cr]": 894,
1084
+ "[62Cu+2]": 895,
1085
+ "[81Br]": 896,
1086
+ "[121Sb]": 897,
1087
+ "[7Li]": 898,
1088
+ "[89Zr+4]": 899,
1089
+ "[SbH3+]": 900,
1090
+ "[11C@@H]": 901,
1091
+ "[98Tc]": 902,
1092
+ "[59Fe+3]": 903,
1093
+ "[BiH2+]": 904,
1094
+ "[SbH+]": 905,
1095
+ "[TiH]": 906,
1096
+ "[14NH3]": 907,
1097
+ "[15OH]": 908,
1098
+ "[119Sn]": 909,
1099
+ "[201Hg]": 910,
1100
+ "[MnH+]": 911,
1101
+ "[201Tl]": 912,
1102
+ "[51Cr+3]": 913,
1103
+ "[123I-]": 914,
1104
+ "[MoH]": 915,
1105
+ "[AlH6-3]": 916,
1106
+ "[MnH2]": 917,
1107
+ "[WH3]": 918,
1108
+ "[213Bi+3]": 919,
1109
+ "[SnH2+2]": 920,
1110
+ "[123IH]": 921,
1111
+ "[13CH+]": 922,
1112
+ "[Zr-]": 923,
1113
+ "[74As]": 924,
1114
+ "[13C+]": 925,
1115
+ "[32P+]": 926,
1116
+ "[KrH]": 927,
1117
+ "[SiH+2]": 928,
1118
+ "[ClH3+2]": 929,
1119
+ "[13NH]": 930,
1120
+ "[9CH2]": 931,
1121
+ "[ZrH2+2]": 932,
1122
+ "[87Sr+2]": 933,
1123
+ "[35s]": 934,
1124
+ "[239Pu]": 935,
1125
+ "[198Au]": 936,
1126
+ "[241Am]": 937,
1127
+ "[203Hg+2]": 938,
1128
+ "[V+]": 939,
1129
+ "[YH2]": 940,
1130
+ "[195Pt]": 941,
1131
+ "[203Pb]": 942,
1132
+ "[RuH4]": 943,
1133
+ "[ThH2]": 944,
1134
+ "[AuH]": 945,
1135
+ "[66Ga+3]": 946,
1136
+ "[11B-]": 947,
1137
+ "[F]": 948,
1138
+ "[24Na+]": 949,
1139
+ "[85Sr+2]": 950,
1140
+ "[201Tl+]": 951,
1141
+ "[14CH4]": 952,
1142
+ "[32S]": 953,
1143
+ "[TeH2+]": 954,
1144
+ "[ClH2+3]": 955,
1145
+ "[AgH]": 956,
1146
+ "[Ge@H]": 957,
1147
+ "[44Ca+2]": 958,
1148
+ "[Os-]": 959,
1149
+ "[31P]": 960,
1150
+ "[15nH+]": 961,
1151
+ "[SbH4]": 962,
1152
+ "[TiH+]": 963,
1153
+ "[Ba+]": 964,
1154
+ "[57Co+2]": 965,
1155
+ "[Ta+]": 966,
1156
+ "[125IH]": 967,
1157
+ "[77As]": 968,
1158
+ "[129I]": 969,
1159
+ "[Fe-4]": 970,
1160
+ "[Ta-2]": 971,
1161
+ "[19O]": 972,
1162
+ "[12O]": 973,
1163
+ "[BiH3]": 974,
1164
+ "[237Np]": 975,
1165
+ "[252Cf]": 976,
1166
+ "[86Y]": 977,
1167
+ "[Cr-2]": 978,
1168
+ "[89Y]": 979,
1169
+ "[195Pt+2]": 980,
1170
+ "[si+2]": 981,
1171
+ "[58Fe+2]": 982,
1172
+ "[Hs]": 983,
1173
+ "[S@@H]": 984,
1174
+ "[8CH4]": 985,
1175
+ "[164Dy+3]": 986,
1176
+ "[47Ca+2]": 987,
1177
+ "[57Co]": 988,
1178
+ "[NbH2]": 989,
1179
+ "[ReH2]": 990,
1180
+ "[ZnH2]": 991,
1181
+ "[CrH2]": 992,
1182
+ "[17NH]": 993,
1183
+ "[ZrH3]": 994,
1184
+ "[RhH3]": 995,
1185
+ "[12C-]": 996,
1186
+ "[18O+]": 997,
1187
+ "[Bi-2]": 998,
1188
+ "[ClH4+3]": 999,
1189
+ "[Ni-3]": 1000,
1190
+ "[Ag-]": 1001,
1191
+ "[111In-]": 1002,
1192
+ "[Mo-2]": 1003,
1193
+ "[55Fe+3]": 1004,
1194
+ "[204Hg+]": 1005,
1195
+ "[35Cl-]": 1006,
1196
+ "[211Pb]": 1007,
1197
+ "[75Ge]": 1008,
1198
+ "[8B]": 1009,
1199
+ "[TeH3]": 1010,
1200
+ "[SnH3+]": 1011,
1201
+ "[Zr-3]": 1012,
1202
+ "[28F]": 1013,
1203
+ "[249Bk]": 1014,
1204
+ "[169Yb]": 1015,
1205
+ "[34SH]": 1016,
1206
+ "[6Li]": 1017,
1207
+ "[94Tc]": 1018,
1208
+ "[197Au]": 1019,
1209
+ "[195Pt+4]": 1020,
1210
+ "[169Yb+3]": 1021,
1211
+ "[32Cl]": 1022,
1212
+ "[82Se]": 1023,
1213
+ "[159Gd+3]": 1024,
1214
+ "[213Bi]": 1025,
1215
+ "[CoH+2]": 1026,
1216
+ "[36S]": 1027,
1217
+ "[35P]": 1028,
1218
+ "[Ru-4]": 1029,
1219
+ "[Cr-3]": 1030,
1220
+ "[60Co]": 1031,
1221
+ "[1H+]": 1032,
1222
+ "[18CH2]": 1033,
1223
+ "[Cd-]": 1034,
1224
+ "[152Sm+3]": 1035,
1225
+ "[106Ru]": 1036,
1226
+ "[238Pu]": 1037,
1227
+ "[220Rn]": 1038,
1228
+ "[45Ca+2]": 1039,
1229
+ "[89Sr+2]": 1040,
1230
+ "[239Np]": 1041,
1231
+ "[90Sr+2]": 1042,
1232
+ "[137Cs+]": 1043,
1233
+ "[165Dy]": 1044,
1234
+ "[68GaH3]": 1045,
1235
+ "[65Zn+2]": 1046,
1236
+ "[89Zr]": 1047,
1237
+ "[BiH2+2]": 1048,
1238
+ "[62Cu]": 1049,
1239
+ "[165Dy+3]": 1050,
1240
+ "[238U]": 1051,
1241
+ "[105Rh+3]": 1052,
1242
+ "[70Zn]": 1053,
1243
+ "[12B]": 1054,
1244
+ "[12OH]": 1055,
1245
+ "[18CH]": 1056,
1246
+ "[17CH]": 1057,
1247
+ "[42K]": 1058,
1248
+ "[76Br-]": 1059,
1249
+ "[71As]": 1060,
1250
+ "[NbH3]": 1061,
1251
+ "[ReH3]": 1062,
1252
+ "[OsH-]": 1063,
1253
+ "[WH4]": 1064,
1254
+ "[MoH3]": 1065,
1255
+ "[OsH4]": 1066,
1256
+ "[RuH6]": 1067,
1257
+ "[PtH3]": 1068,
1258
+ "[CuH2]": 1069,
1259
+ "[CoH3]": 1070,
1260
+ "[TiH4]": 1071,
1261
+ "[64Zn+2]": 1072,
1262
+ "[Si-2]": 1073,
1263
+ "[79BrH]": 1074,
1264
+ "[14CH2-]": 1075,
1265
+ "[PtH2+2]": 1076,
1266
+ "[Os-3]": 1077,
1267
+ "[29Si]": 1078,
1268
+ "[Ti-]": 1079,
1269
+ "[Se+6]": 1080,
1270
+ "[22Na+]": 1081,
1271
+ "[42K+]": 1082,
1272
+ "[131Cs+]": 1083,
1273
+ "[86Rb+]": 1084,
1274
+ "[134Cs+]": 1085,
1275
+ "[209Po]": 1086,
1276
+ "[208Po]": 1087,
1277
+ "[81Rb+]": 1088,
1278
+ "[203Tl+]": 1089,
1279
+ "[Zr-4]": 1090,
1280
+ "[148Sm]": 1091,
1281
+ "[147Sm]": 1092,
1282
+ "[37Cl-]": 1093,
1283
+ "[12CH4]": 1094,
1284
+ "[Ge@@H]": 1095,
1285
+ "[63Cu]": 1096,
1286
+ "[13CH2+]": 1097,
1287
+ "[AsH2-]": 1098,
1288
+ "[CeH]": 1099,
1289
+ "[SnH-]": 1100,
1290
+ "[UH]": 1101,
1291
+ "[9c]": 1102,
1292
+ "[21CH3]": 1103,
1293
+ "[TeH+]": 1104,
1294
+ "[57Co+3]": 1105,
1295
+ "[8BH2]": 1106,
1296
+ "[12BH2]": 1107,
1297
+ "[19BH2]": 1108,
1298
+ "[9BH2]": 1109,
1299
+ "[YbH2]": 1110,
1300
+ "[CrH+2]": 1111,
1301
+ "[208Bi]": 1112,
1302
+ "[152Gd]": 1113,
1303
+ "[61Cu]": 1114,
1304
+ "[115In]": 1115,
1305
+ "[60Co+2]": 1116,
1306
+ "[13NH2-]": 1117,
1307
+ "[120I]": 1118,
1308
+ "[18OH2]": 1119,
1309
+ "[75SeH]": 1120,
1310
+ "[SbH2+]": 1121,
1311
+ "[144Ce]": 1122,
1312
+ "[16n]": 1123,
1313
+ "[113In]": 1124,
1314
+ "[22nH]": 1125,
1315
+ "[129I-]": 1126,
1316
+ "[InH3]": 1127,
1317
+ "[32PH3]": 1128,
1318
+ "[234U]": 1129,
1319
+ "[235U]": 1130,
1320
+ "[59Fe]": 1131,
1321
+ "[82Rb+]": 1132,
1322
+ "[65Zn]": 1133,
1323
+ "[244Cm]": 1134,
1324
+ "[147Pm]": 1135,
1325
+ "[91Y]": 1136,
1326
+ "[237Pu]": 1137,
1327
+ "[231Pa]": 1138,
1328
+ "[253Cf]": 1139,
1329
+ "[127Te]": 1140,
1330
+ "[187Re]": 1141,
1331
+ "[236Np]": 1142,
1332
+ "[235Np]": 1143,
1333
+ "[72Zn]": 1144,
1334
+ "[253Es]": 1145,
1335
+ "[159Dy]": 1146,
1336
+ "[62Zn]": 1147,
1337
+ "[101Tc]": 1148,
1338
+ "[149Tb]": 1149,
1339
+ "[124I-]": 1150,
1340
+ "[SeH3+]": 1151,
1341
+ "[210Pb]": 1152,
1342
+ "[40K]": 1153,
1343
+ "[210Po]": 1154,
1344
+ "[214Pb]": 1155,
1345
+ "[218Po]": 1156,
1346
+ "[214Po]": 1157,
1347
+ "[7Be]": 1158,
1348
+ "[212Pb]": 1159,
1349
+ "[205Pb]": 1160,
1350
+ "[209Pb]": 1161,
1351
+ "[123Te]": 1162,
1352
+ "[202Pb]": 1163,
1353
+ "[72As]": 1164,
1354
+ "[201Pb]": 1165,
1355
+ "[70As]": 1166,
1356
+ "[73Ge]": 1167,
1357
+ "[200Pb]": 1168,
1358
+ "[198Pb]": 1169,
1359
+ "[66Ga]": 1170,
1360
+ "[73Se]": 1171,
1361
+ "[195Pb]": 1172,
1362
+ "[199Pb]": 1173,
1363
+ "[144Ce+3]": 1174,
1364
+ "[235U+2]": 1175,
1365
+ "[90Tc]": 1176,
1366
+ "[114In+3]": 1177,
1367
+ "[128I]": 1178,
1368
+ "[100Tc+]": 1179,
1369
+ "[82Br-]": 1180,
1370
+ "[191Pt+2]": 1181,
1371
+ "[191Pt+4]": 1182,
1372
+ "[193Pt+4]": 1183,
1373
+ "[31PH3]": 1184,
1374
+ "[125I+2]": 1185,
1375
+ "[131I+2]": 1186,
1376
+ "[125Te+4]": 1187,
1377
+ "[82Sr+2]": 1188,
1378
+ "[149Sm]": 1189,
1379
+ "[81BrH]": 1190,
1380
+ "[129Xe]": 1191,
1381
+ "[193Pt+2]": 1192,
1382
+ "[123I+2]": 1193,
1383
+ "[Cr-]": 1194,
1384
+ "[Co-]": 1195,
1385
+ "[227Th+4]": 1196,
1386
+ "[249Cf+3]": 1197,
1387
+ "[252Cf+3]": 1198,
1388
+ "[187Os]": 1199,
1389
+ "[16O-]": 1200,
1390
+ "[17O+]": 1201,
1391
+ "[16OH-]": 1202,
1392
+ "[98Tc+7]": 1203,
1393
+ "[58Co+2]": 1204,
1394
+ "[69Ga+3]": 1205,
1395
+ "[57Fe+2]": 1206,
1396
+ "[43K+]": 1207,
1397
+ "[16C]": 1208,
1398
+ "[52Fe+3]": 1209,
1399
+ "[SeH5]": 1210,
1400
+ "[194Pb]": 1211,
1401
+ "[196Pb]": 1212,
1402
+ "[197Pb]": 1213,
1403
+ "[213Pb]": 1214,
1404
+ "[9B]": 1215,
1405
+ "[19B]": 1216,
1406
+ "[11CH-]": 1217,
1407
+ "[9CH]": 1218,
1408
+ "[20OH]": 1219,
1409
+ "[25OH]": 1220,
1410
+ "[8cH]": 1221,
1411
+ "[TiH+3]": 1222,
1412
+ "[SnH6+3]": 1223,
1413
+ "[N@H+]": 1224,
1414
+ "[52Mn+2]": 1225,
1415
+ "[64Ga]": 1226,
1416
+ "[13B]": 1227,
1417
+ "[216Bi]": 1228,
1418
+ "[117Sn+2]": 1229,
1419
+ "[232Th]": 1230,
1420
+ "[SnH+2]": 1231,
1421
+ "[BiH5]": 1232,
1422
+ "[77Kr]": 1233,
1423
+ "[103Cd]": 1234,
1424
+ "[62Ni]": 1235,
1425
+ "[LaH3]": 1236,
1426
+ "[SmH3]": 1237,
1427
+ "[EuH3]": 1238,
1428
+ "[MoH5]": 1239,
1429
+ "[64Ni]": 1240,
1430
+ "[66Zn]": 1241,
1431
+ "[68Zn]": 1242,
1432
+ "[186W]": 1243,
1433
+ "[FeH4]": 1244,
1434
+ "[MoH4]": 1245,
1435
+ "[HgH2]": 1246,
1436
+ "[15NH2-]": 1247,
1437
+ "[UH2]": 1248,
1438
+ "[204Hg]": 1249,
1439
+ "[GaH4-]": 1250,
1440
+ "[ThH4]": 1251,
1441
+ "[WH6]": 1252,
1442
+ "[PtH4]": 1253,
1443
+ "[VH2]": 1254,
1444
+ "[UH3]": 1255,
1445
+ "[FeH3]": 1256,
1446
+ "[RuH5]": 1257,
1447
+ "[BiH4]": 1258,
1448
+ "[80Br-]": 1259,
1449
+ "[CeH3]": 1260,
1450
+ "[37ClH]": 1261,
1451
+ "[157Gd+3]": 1262,
1452
+ "[205Tl]": 1263,
1453
+ "[203Tl]": 1264,
1454
+ "[62Cu+]": 1265,
1455
+ "[64Cu+]": 1266,
1456
+ "[61Cu+]": 1267,
1457
+ "[37SH2]": 1268,
1458
+ "[30Si]": 1269,
1459
+ "[28Al]": 1270,
1460
+ "[19OH2]": 1271,
1461
+ "[8He]": 1272,
1462
+ "[6He]": 1273,
1463
+ "[153Pm]": 1274,
1464
+ "[209Bi]": 1275,
1465
+ "[66Zn+2]": 1276,
1466
+ "[10CH4]": 1277,
1467
+ "[191Ir]": 1278,
1468
+ "[66Cu]": 1279,
1469
+ "[16O+]": 1280,
1470
+ "[25O]": 1281,
1471
+ "[10c]": 1282,
1472
+ "[Co-3]": 1283,
1473
+ "[Sn@@]": 1284,
1474
+ "[17OH-]": 1285,
1475
+ "[206Po]": 1286,
1476
+ "[204Po]": 1287,
1477
+ "[202Po]": 1288,
1478
+ "[201Po]": 1289,
1479
+ "[200Po]": 1290,
1480
+ "[199Po]": 1291,
1481
+ "[198Po]": 1292,
1482
+ "[197Po]": 1293,
1483
+ "[196Po]": 1294,
1484
+ "[195Po]": 1295,
1485
+ "[194Po]": 1296,
1486
+ "[193Po]": 1297,
1487
+ "[192Po]": 1298,
1488
+ "[191Po]": 1299,
1489
+ "[190Po]": 1300,
1490
+ "[217Po]": 1301,
1491
+ "[BiH4-]": 1302,
1492
+ "[TeH4]": 1303,
1493
+ "[222Ra]": 1304,
1494
+ "[62Ga]": 1305,
1495
+ "[39Ar]": 1306,
1496
+ "[144Sm]": 1307,
1497
+ "[58Fe]": 1308,
1498
+ "[153Eu]": 1309,
1499
+ "[85Rb]": 1310,
1500
+ "[171Yb]": 1311,
1501
+ "[172Yb]": 1312,
1502
+ "[114Cd]": 1313,
1503
+ "[51Fe]": 1314,
1504
+ "[142Ce]": 1315,
1505
+ "[207Tl]": 1316,
1506
+ "[92Mo]": 1317,
1507
+ "[115Sn]": 1318,
1508
+ "[140Ce]": 1319,
1509
+ "[202Hg]": 1320,
1510
+ "[180W]": 1321,
1511
+ "[182W]": 1322,
1512
+ "[183W]": 1323,
1513
+ "[184W]": 1324,
1514
+ "[96Mo]": 1325,
1515
+ "[47Ti]": 1326,
1516
+ "[111Cd]": 1327,
1517
+ "[143Nd]": 1328,
1518
+ "[145Nd]": 1329,
1519
+ "[126Te]": 1330,
1520
+ "[128Te]": 1331,
1521
+ "[130Te]": 1332,
1522
+ "[185Re]": 1333,
1523
+ "[97Mo]": 1334,
1524
+ "[98Mo]": 1335,
1525
+ "[183Re]": 1336,
1526
+ "[52V]": 1337,
1527
+ "[80Se]": 1338,
1528
+ "[87Kr]": 1339,
1529
+ "[137Xe]": 1340,
1530
+ "[196Au]": 1341,
1531
+ "[146Ce]": 1342,
1532
+ "[88Kr]": 1343,
1533
+ "[51Ti]": 1344,
1534
+ "[138Xe]": 1345,
1535
+ "[112Cd]": 1346,
1536
+ "[116Sn]": 1347,
1537
+ "[120Sn]": 1348,
1538
+ "[28SiH3]": 1349,
1539
+ "[35S-]": 1350,
1540
+ "[15NH-]": 1351,
1541
+ "[13CH3+]": 1352,
1542
+ "[34S+]": 1353,
1543
+ "[34s]": 1354,
1544
+ "[SiH4-]": 1355,
1545
+ "[100Tc+5]": 1356,
1546
+ "[NiH2+2]": 1357,
1547
+ "[239Th]": 1358,
1548
+ "[186Lu]": 1359,
1549
+ "[AuH3]": 1360,
1550
+ "[I@@-]": 1361,
1551
+ "[XeH2]": 1362,
1552
+ "[B+]": 1363,
1553
+ "[16CH2]": 1364,
1554
+ "[8C]": 1365,
1555
+ "[TaH5]": 1366,
1556
+ "[FeH4-]": 1367,
1557
+ "[19C@H]": 1368,
1558
+ "[10NH]": 1369,
1559
+ "[FeH6-3]": 1370,
1560
+ "[22CH]": 1371,
1561
+ "[25N]": 1372,
1562
+ "[25N+]": 1373,
1563
+ "[25N-]": 1374,
1564
+ "[21CH2]": 1375,
1565
+ "[18cH]": 1376,
1566
+ "[113I]": 1377,
1567
+ "[ScH3]": 1378,
1568
+ "[30PH3]": 1379,
1569
+ "[43Ca+2]": 1380,
1570
+ "[41Ca+2]": 1381,
1571
+ "[106Cd]": 1382,
1572
+ "[122Sn]": 1383,
1573
+ "[18CH3]": 1384,
1574
+ "[58Co+3]": 1385,
1575
+ "[98Tc+4]": 1386,
1576
+ "[70Ge]": 1387,
1577
+ "[76Ge]": 1388,
1578
+ "[108Cd]": 1389,
1579
+ "[116Cd]": 1390,
1580
+ "[130Xe]": 1391,
1581
+ "[94Mo]": 1392,
1582
+ "[124Sn]": 1393,
1583
+ "[186Os]": 1394,
1584
+ "[188Os]": 1395,
1585
+ "[190Os]": 1396,
1586
+ "[192Os]": 1397,
1587
+ "[106Pd]": 1398,
1588
+ "[110Pd]": 1399,
1589
+ "[120Te]": 1400,
1590
+ "[132Ba]": 1401,
1591
+ "[134Ba]": 1402,
1592
+ "[136Ba]": 1403,
1593
+ "[136Ce]": 1404,
1594
+ "[138Ce]": 1405,
1595
+ "[156Dy]": 1406,
1596
+ "[158Dy]": 1407,
1597
+ "[160Dy]": 1408,
1598
+ "[163Dy]": 1409,
1599
+ "[162Er]": 1410,
1600
+ "[164Er]": 1411,
1601
+ "[167Er]": 1412,
1602
+ "[176Hf]": 1413,
1603
+ "[26Mg]": 1414,
1604
+ "[144Nd]": 1415,
1605
+ "[150Nd]": 1416,
1606
+ "[41K]": 1417,
1607
+ "[46Ti]": 1418,
1608
+ "[48Ti]": 1419,
1609
+ "[49Ti]": 1420,
1610
+ "[50Ti]": 1421,
1611
+ "[170Yb]": 1422,
1612
+ "[173Yb]": 1423,
1613
+ "[91Zr]": 1424,
1614
+ "[92Zr]": 1425,
1615
+ "[96Zr]": 1426,
1616
+ "[34S-]": 1427,
1617
+ "[CuH2-]": 1428,
1618
+ "[38Cl]": 1429,
1619
+ "[25Mg]": 1430,
1620
+ "[51V]": 1431,
1621
+ "[93Nb]": 1432,
1622
+ "[95Mo]": 1433,
1623
+ "[45Sc]": 1434,
1624
+ "[123Sb]": 1435,
1625
+ "[139La]": 1436,
1626
+ "[9Be]": 1437,
1627
+ "[99Y+3]": 1438,
1628
+ "[99Y]": 1439,
1629
+ "[156Ho]": 1440,
1630
+ "[67Zn]": 1441,
1631
+ "[144Ce+4]": 1442,
1632
+ "[210Tl]": 1443,
1633
+ "[42Ca]": 1444,
1634
+ "[54Fe]": 1445,
1635
+ "[193Ir]": 1446,
1636
+ "[92Nb]": 1447,
1637
+ "[141Cs]": 1448,
1638
+ "[52Cr]": 1449,
1639
+ "[35ClH]": 1450,
1640
+ "[46Ca]": 1451,
1641
+ "[139Cs]": 1452,
1642
+ "[65Cu]": 1453,
1643
+ "[71Ga]": 1454,
1644
+ "[60Ni]": 1455,
1645
+ "[16NH3]": 1456,
1646
+ "[148Nd]": 1457,
1647
+ "[72Ge]": 1458,
1648
+ "[161Dy]": 1459,
1649
+ "[49Ca]": 1460,
1650
+ "[43Ca]": 1461,
1651
+ "[8Be]": 1462,
1652
+ "[48Ca]": 1463,
1653
+ "[44Ca]": 1464,
1654
+ "[120Xe]": 1465,
1655
+ "[80Rb]": 1466,
1656
+ "[215At]": 1467,
1657
+ "[180Re]": 1468,
1658
+ "[146Sm]": 1469,
1659
+ "[19Ne]": 1470,
1660
+ "[74Kr]": 1471,
1661
+ "[134La]": 1472,
1662
+ "[76Kr]": 1473,
1663
+ "[219Fr]": 1474,
1664
+ "[121Xe]": 1475,
1665
+ "[220Fr]": 1476,
1666
+ "[216At]": 1477,
1667
+ "[223Ac]": 1478,
1668
+ "[218At]": 1479,
1669
+ "[37Ar]": 1480,
1670
+ "[135I]": 1481,
1671
+ "[110Cd]": 1482,
1672
+ "[94Tc+7]": 1483,
1673
+ "[86Y+3]": 1484,
1674
+ "[135I-]": 1485,
1675
+ "[15O-2]": 1486,
1676
+ "[151Eu+3]": 1487,
1677
+ "[161Tb+3]": 1488,
1678
+ "[197Hg+2]": 1489,
1679
+ "[109Cd+2]": 1490,
1680
+ "[191Os+4]": 1491,
1681
+ "[170Tm+3]": 1492,
1682
+ "[205Bi+3]": 1493,
1683
+ "[233U+4]": 1494,
1684
+ "[126Sb+3]": 1495,
1685
+ "[127Sb+3]": 1496,
1686
+ "[132Cs+]": 1497,
1687
+ "[136Eu+3]": 1498,
1688
+ "[136Eu]": 1499,
1689
+ "[125Sn+4]": 1500,
1690
+ "[175Yb+3]": 1501,
1691
+ "[100Mo]": 1502,
1692
+ "[22Ne]": 1503,
1693
+ "[13c-]": 1504,
1694
+ "[13NH4+]": 1505,
1695
+ "[17C]": 1506,
1696
+ "[9C]": 1507,
1697
+ "[31S]": 1508,
1698
+ "[31SH]": 1509,
1699
+ "[133I]": 1510,
1700
+ "[126I]": 1511,
1701
+ "[36SH]": 1512,
1702
+ "[30S]": 1513,
1703
+ "[32SH]": 1514,
1704
+ "[19CH2]": 1515,
1705
+ "[19c]": 1516,
1706
+ "[18c]": 1517,
1707
+ "[15F]": 1518,
1708
+ "[10C]": 1519,
1709
+ "[RuH-]": 1520,
1710
+ "[62Zn+2]": 1521,
1711
+ "[32ClH]": 1522,
1712
+ "[33ClH]": 1523,
1713
+ "[78BrH]": 1524,
1714
+ "[12Li+]": 1525,
1715
+ "[12Li]": 1526,
1716
+ "[233Ra]": 1527,
1717
+ "[68Ge+4]": 1528,
1718
+ "[44Sc+3]": 1529,
1719
+ "[91Y+3]": 1530,
1720
+ "[106Ru+3]": 1531,
1721
+ "[PoH2]": 1532,
1722
+ "[AtH]": 1533,
1723
+ "[55Fe]": 1534,
1724
+ "[233U]": 1535,
1725
+ "[210PoH2]": 1536,
1726
+ "[230Th]": 1537,
1727
+ "[228Th]": 1538,
1728
+ "[222Rn]": 1539,
1729
+ "[35SH2]": 1540,
1730
+ "[227Th]": 1541,
1731
+ "[192Ir]": 1542,
1732
+ "[133Xe]": 1543,
1733
+ "[81Kr]": 1544,
1734
+ "[95Zr]": 1545,
1735
+ "[240Pu]": 1546,
1736
+ "[54Mn]": 1547,
1737
+ "[103Ru]": 1548,
1738
+ "[95Nb]": 1549,
1739
+ "[109Cd]": 1550,
1740
+ "[141Ce]": 1551,
1741
+ "[85Kr]": 1552,
1742
+ "[110Ag]": 1553,
1743
+ "[58Co]": 1554,
1744
+ "[241Pu]": 1555,
1745
+ "[234Th]": 1556,
1746
+ "[140La]": 1557,
1747
+ "[63Ni]": 1558,
1748
+ "[152Eu]": 1559,
1749
+ "[132IH]": 1560,
1750
+ "[226Rn]": 1561,
1751
+ "[154Eu]": 1562,
1752
+ "[36ClH]": 1563,
1753
+ "[228Ac]": 1564,
1754
+ "[155Eu]": 1565,
1755
+ "[106Rh]": 1566,
1756
+ "[243Am]": 1567,
1757
+ "[227Ac]": 1568,
1758
+ "[243Cm]": 1569,
1759
+ "[236U]": 1570,
1760
+ "[144Pr]": 1571,
1761
+ "[232U]": 1572,
1762
+ "[32SH2]": 1573,
1763
+ "[88Y]": 1574,
1764
+ "[82BrH]": 1575,
1765
+ "[135IH]": 1576,
1766
+ "[242Cm]": 1577,
1767
+ "[115Cd]": 1578,
1768
+ "[242Pu]": 1579,
1769
+ "[46Sc]": 1580,
1770
+ "[56Mn]": 1581,
1771
+ "[234Pa]": 1582,
1772
+ "[41Ar]": 1583,
1773
+ "[147Nd]": 1584,
1774
+ "[187W]": 1585,
1775
+ "[151Sm]": 1586,
1776
+ "[59Ni]": 1587,
1777
+ "[233Pa]": 1588,
1778
+ "[52Mn]": 1589,
1779
+ "[94Nb]": 1590,
1780
+ "[219Rn]": 1591,
1781
+ "[236Pu]": 1592,
1782
+ "[13NH3]": 1593,
1783
+ "[93Zr]": 1594,
1784
+ "[51Cr+6]": 1595,
1785
+ "[TlH3]": 1596,
1786
+ "[123Xe]": 1597,
1787
+ "[160Tb]": 1598,
1788
+ "[170Tm]": 1599,
1789
+ "[182Ta]": 1600,
1790
+ "[175Yb]": 1601,
1791
+ "[93Mo]": 1602,
1792
+ "[143Ce]": 1603,
1793
+ "[191Os]": 1604,
1794
+ "[126IH]": 1605,
1795
+ "[48V]": 1606,
1796
+ "[113Cd]": 1607,
1797
+ "[47Sc]": 1608,
1798
+ "[181Hf]": 1609,
1799
+ "[185W]": 1610,
1800
+ "[143Pr]": 1611,
1801
+ "[191Pt]": 1612,
1802
+ "[181W]": 1613,
1803
+ "[33PH3]": 1614,
1804
+ "[97Ru]": 1615,
1805
+ "[97Tc]": 1616,
1806
+ "[111Ag]": 1617,
1807
+ "[169Er]": 1618,
1808
+ "[107Pd]": 1619,
1809
+ "[103Ru+2]": 1620,
1810
+ "[34SH2]": 1621,
1811
+ "[137Ce]": 1622,
1812
+ "[242Am]": 1623,
1813
+ "[117SnH2]": 1624,
1814
+ "[57Ni]": 1625,
1815
+ "[239U]": 1626,
1816
+ "[60Cu]": 1627,
1817
+ "[250Cf]": 1628,
1818
+ "[193Au]": 1629,
1819
+ "[69Zn]": 1630,
1820
+ "[55Co]": 1631,
1821
+ "[139Ce]": 1632,
1822
+ "[127Xe]": 1633,
1823
+ "[159Gd]": 1634,
1824
+ "[56Co]": 1635,
1825
+ "[177Hf]": 1636,
1826
+ "[244Pu]": 1637,
1827
+ "[38ClH]": 1638,
1828
+ "[142Pr]": 1639,
1829
+ "[199Hg]": 1640,
1830
+ "[179Hf]": 1641,
1831
+ "[178Hf]": 1642,
1832
+ "[237U]": 1643,
1833
+ "[156Eu]": 1644,
1834
+ "[157Eu]": 1645,
1835
+ "[105Ru]": 1646,
1836
+ "[171Tm]": 1647,
1837
+ "[199Au]": 1648,
1838
+ "[155Sm]": 1649,
1839
+ "[80BrH]": 1650,
1840
+ "[108Ag]": 1651,
1841
+ "[128IH]": 1652,
1842
+ "[48Sc]": 1653,
1843
+ "[45Ti]": 1654,
1844
+ "[176Lu]": 1655,
1845
+ "[121SnH2]": 1656,
1846
+ "[148Pm]": 1657,
1847
+ "[57Fe]": 1658,
1848
+ "[10BH3]": 1659,
1849
+ "[96Tc]": 1660,
1850
+ "[133IH]": 1661,
1851
+ "[143Pm]": 1662,
1852
+ "[105Rh]": 1663,
1853
+ "[130IH]": 1664,
1854
+ "[134IH]": 1665,
1855
+ "[131IH]": 1666,
1856
+ "[71Zn]": 1667,
1857
+ "[105Ag]": 1668,
1858
+ "[97Zr]": 1669,
1859
+ "[235Pu]": 1670,
1860
+ "[231Th]": 1671,
1861
+ "[109Pd]": 1672,
1862
+ "[93Y]": 1673,
1863
+ "[190Ir]": 1674,
1864
+ "[135Xe]": 1675,
1865
+ "[53Mn]": 1676,
1866
+ "[134Ce]": 1677,
1867
+ "[234Np]": 1678,
1868
+ "[240Am]": 1679,
1869
+ "[246Cf]": 1680,
1870
+ "[240Cm]": 1681,
1871
+ "[241Cm]": 1682,
1872
+ "[226Th]": 1683,
1873
+ "[39ClH]": 1684,
1874
+ "[229Th]": 1685,
1875
+ "[245Cm]": 1686,
1876
+ "[240U]": 1687,
1877
+ "[240Np]": 1688,
1878
+ "[249Cm]": 1689,
1879
+ "[243Pu]": 1690,
1880
+ "[145Pm]": 1691,
1881
+ "[199Pt]": 1692,
1882
+ "[246Bk]": 1693,
1883
+ "[193Pt]": 1694,
1884
+ "[230U]": 1695,
1885
+ "[250Cm]": 1696,
1886
+ "[44Ti]": 1697,
1887
+ "[175Hf]": 1698,
1888
+ "[254Fm]": 1699,
1889
+ "[255Fm]": 1700,
1890
+ "[257Fm]": 1701,
1891
+ "[92Y]": 1702,
1892
+ "[188Ir]": 1703,
1893
+ "[171Lu]": 1704,
1894
+ "[257Md]": 1705,
1895
+ "[247Bk]": 1706,
1896
+ "[121IH]": 1707,
1897
+ "[250Bk]": 1708,
1898
+ "[179Lu]": 1709,
1899
+ "[224Ac]": 1710,
1900
+ "[195Hg]": 1711,
1901
+ "[244Am]": 1712,
1902
+ "[246Pu]": 1713,
1903
+ "[194Au]": 1714,
1904
+ "[252Fm]": 1715,
1905
+ "[173Hf]": 1716,
1906
+ "[246Cm]": 1717,
1907
+ "[135Ce]": 1718,
1908
+ "[49Cr]": 1719,
1909
+ "[248Cf]": 1720,
1910
+ "[247Cm]": 1721,
1911
+ "[248Cm]": 1722,
1912
+ "[174Ta]": 1723,
1913
+ "[176Ta]": 1724,
1914
+ "[154Tb]": 1725,
1915
+ "[172Ta]": 1726,
1916
+ "[177Ta]": 1727,
1917
+ "[175Ta]": 1728,
1918
+ "[180Ta]": 1729,
1919
+ "[158Tb]": 1730,
1920
+ "[115Ag]": 1731,
1921
+ "[189Os]": 1732,
1922
+ "[251Cf]": 1733,
1923
+ "[145Pr]": 1734,
1924
+ "[147Pr]": 1735,
1925
+ "[76BrH]": 1736,
1926
+ "[102Rh]": 1737,
1927
+ "[238Np]": 1738,
1928
+ "[185Os]": 1739,
1929
+ "[246Am]": 1740,
1930
+ "[233Np]": 1741,
1931
+ "[166Dy]": 1742,
1932
+ "[254Es]": 1743,
1933
+ "[244Cf]": 1744,
1934
+ "[193Os]": 1745,
1935
+ "[245Am]": 1746,
1936
+ "[245Bk]": 1747,
1937
+ "[239Am]": 1748,
1938
+ "[238Am]": 1749,
1939
+ "[97Nb]": 1750,
1940
+ "[245Pu]": 1751,
1941
+ "[254Cf]": 1752,
1942
+ "[188W]": 1753,
1943
+ "[250Es]": 1754,
1944
+ "[251Es]": 1755,
1945
+ "[237Am]": 1756,
1946
+ "[182Hf]": 1757,
1947
+ "[258Md]": 1758,
1948
+ "[232Np]": 1759,
1949
+ "[238Cm]": 1760,
1950
+ "[60Fe]": 1761,
1951
+ "[109Pd+2]": 1762,
1952
+ "[234Pu]": 1763,
1953
+ "[141Ce+3]": 1764,
1954
+ "[136Nd]": 1765,
1955
+ "[136Pr]": 1766,
1956
+ "[173Ta]": 1767,
1957
+ "[110Ru]": 1768,
1958
+ "[147Tb]": 1769,
1959
+ "[253Fm]": 1770,
1960
+ "[139Nd]": 1771,
1961
+ "[178Re]": 1772,
1962
+ "[177Re]": 1773,
1963
+ "[200Au]": 1774,
1964
+ "[182Re]": 1775,
1965
+ "[156Tb]": 1776,
1966
+ "[155Tb]": 1777,
1967
+ "[157Tb]": 1778,
1968
+ "[161Tb]": 1779,
1969
+ "[161Ho]": 1780,
1970
+ "[167Tm]": 1781,
1971
+ "[173Lu]": 1782,
1972
+ "[179Ta]": 1783,
1973
+ "[171Er]": 1784,
1974
+ "[44Sc]": 1785,
1975
+ "[49Sc]": 1786,
1976
+ "[49V]": 1787,
1977
+ "[51Mn]": 1788,
1978
+ "[90Nb]": 1789,
1979
+ "[88Nb]": 1790,
1980
+ "[88Zr]": 1791,
1981
+ "[36SH2]": 1792,
1982
+ "[174Yb]": 1793,
1983
+ "[178Lu]": 1794,
1984
+ "[179W]": 1795,
1985
+ "[83BrH]": 1796,
1986
+ "[107Cd]": 1797,
1987
+ "[75BrH]": 1798,
1988
+ "[62Co]": 1799,
1989
+ "[48Cr]": 1800,
1990
+ "[63Zn]": 1801,
1991
+ "[102Ag]": 1802,
1992
+ "[154Sm]": 1803,
1993
+ "[168Er]": 1804,
1994
+ "[65Ni]": 1805,
1995
+ "[137La]": 1806,
1996
+ "[187Ir]": 1807,
1997
+ "[144Pm]": 1808,
1998
+ "[146Pm]": 1809,
1999
+ "[160Gd]": 1810,
2000
+ "[166Yb]": 1811,
2001
+ "[162Dy]": 1812,
2002
+ "[47V]": 1813,
2003
+ "[141Nd]": 1814,
2004
+ "[141Sm]": 1815,
2005
+ "[166Er]": 1816,
2006
+ "[150Sm]": 1817,
2007
+ "[146Eu]": 1818,
2008
+ "[149Eu]": 1819,
2009
+ "[174Lu]": 1820,
2010
+ "[17NH3]": 1821,
2011
+ "[102Ru]": 1822,
2012
+ "[170Hf]": 1823,
2013
+ "[188Pt]": 1824,
2014
+ "[61Ni]": 1825,
2015
+ "[56Ni]": 1826,
2016
+ "[149Gd]": 1827,
2017
+ "[151Gd]": 1828,
2018
+ "[141Pm]": 1829,
2019
+ "[147Gd]": 1830,
2020
+ "[146Gd]": 1831,
2021
+ "[161Er]": 1832,
2022
+ "[103Ag]": 1833,
2023
+ "[145Eu]": 1834,
2024
+ "[153Tb]": 1835,
2025
+ "[155Dy]": 1836,
2026
+ "[184Re]": 1837,
2027
+ "[180Os]": 1838,
2028
+ "[182Os]": 1839,
2029
+ "[186Pt]": 1840,
2030
+ "[181Os]": 1841,
2031
+ "[181Re]": 1842,
2032
+ "[151Tb]": 1843,
2033
+ "[178Ta]": 1844,
2034
+ "[178W]": 1845,
2035
+ "[189Pt]": 1846,
2036
+ "[194Hg]": 1847,
2037
+ "[145Sm]": 1848,
2038
+ "[150Tb]": 1849,
2039
+ "[132La]": 1850,
2040
+ "[158Gd]": 1851,
2041
+ "[104Ag]": 1852,
2042
+ "[193Hg]": 1853,
2043
+ "[94Ru]": 1854,
2044
+ "[137Pr]": 1855,
2045
+ "[155Ho]": 1856,
2046
+ "[117Cd]": 1857,
2047
+ "[99Ru]": 1858,
2048
+ "[146Nd]": 1859,
2049
+ "[218Rn]": 1860,
2050
+ "[95Y]": 1861,
2051
+ "[79Kr]": 1862,
2052
+ "[120IH]": 1863,
2053
+ "[138Pr]": 1864,
2054
+ "[100Pd]": 1865,
2055
+ "[166Tm]": 1866,
2056
+ "[90Mo]": 1867,
2057
+ "[151Nd]": 1868,
2058
+ "[231U]": 1869,
2059
+ "[138Nd]": 1870,
2060
+ "[89Nb]": 1871,
2061
+ "[98Nb]": 1872,
2062
+ "[162Ho]": 1873,
2063
+ "[142Sm]": 1874,
2064
+ "[186Ta]": 1875,
2065
+ "[104Tc]": 1876,
2066
+ "[184Ta]": 1877,
2067
+ "[185Ta]": 1878,
2068
+ "[170Er]": 1879,
2069
+ "[107Rh]": 1880,
2070
+ "[131La]": 1881,
2071
+ "[169Lu]": 1882,
2072
+ "[74BrH]": 1883,
2073
+ "[150Pm]": 1884,
2074
+ "[172Tm]": 1885,
2075
+ "[197Pt]": 1886,
2076
+ "[230Pu]": 1887,
2077
+ "[170Lu]": 1888,
2078
+ "[86Zr]": 1889,
2079
+ "[176W]": 1890,
2080
+ "[177W]": 1891,
2081
+ "[101Pd]": 1892,
2082
+ "[105Pd]": 1893,
2083
+ "[108Pd]": 1894,
2084
+ "[149Nd]": 1895,
2085
+ "[164Ho]": 1896,
2086
+ "[159Ho]": 1897,
2087
+ "[167Ho]": 1898,
2088
+ "[176Yb]": 1899,
2089
+ "[156Sm]": 1900,
2090
+ "[77BrH]": 1901,
2091
+ "[189Re]": 1902,
2092
+ "[99Rh]": 1903,
2093
+ "[100Rh]": 1904,
2094
+ "[151Pm]": 1905,
2095
+ "[232Pa]": 1906,
2096
+ "[228Pa]": 1907,
2097
+ "[230Pa]": 1908,
2098
+ "[66Ni]": 1909,
2099
+ "[194Os]": 1910,
2100
+ "[135La]": 1911,
2101
+ "[138La]": 1912,
2102
+ "[141La]": 1913,
2103
+ "[142La]": 1914,
2104
+ "[195Ir]": 1915,
2105
+ "[96Nb]": 1916,
2106
+ "[157Ho]": 1917,
2107
+ "[183Hf]": 1918,
2108
+ "[162Tm]": 1919,
2109
+ "[172Er]": 1920,
2110
+ "[148Eu]": 1921,
2111
+ "[150Eu]": 1922,
2112
+ "[15CH4]": 1923,
2113
+ "[89Kr]": 1924,
2114
+ "[143La]": 1925,
2115
+ "[58Ni]": 1926,
2116
+ "[61Co]": 1927,
2117
+ "[158Eu]": 1928,
2118
+ "[165Er]": 1929,
2119
+ "[167Yb]": 1930,
2120
+ "[173Tm]": 1931,
2121
+ "[175Tm]": 1932,
2122
+ "[172Hf]": 1933,
2123
+ "[172Lu]": 1934,
2124
+ "[93Tc]": 1935,
2125
+ "[177Yb]": 1936,
2126
+ "[124IH]": 1937,
2127
+ "[194Ir]": 1938,
2128
+ "[147Eu]": 1939,
2129
+ "[101Mo]": 1940,
2130
+ "[180Hf]": 1941,
2131
+ "[189Ir]": 1942,
2132
+ "[87Y]": 1943,
2133
+ "[43Sc]": 1944,
2134
+ "[195Au]": 1945,
2135
+ "[112Ag]": 1946,
2136
+ "[84BrH]": 1947,
2137
+ "[106Ag]": 1948,
2138
+ "[109Ag]": 1949,
2139
+ "[101Rh]": 1950,
2140
+ "[162Yb]": 1951,
2141
+ "[228Rn]": 1952,
2142
+ "[139Pr]": 1953,
2143
+ "[94Y]": 1954,
2144
+ "[201Au]": 1955,
2145
+ "[40PH3]": 1956,
2146
+ "[110Ag+]": 1957,
2147
+ "[104Cd]": 1958,
2148
+ "[133Ba+2]": 1959,
2149
+ "[226Ac]": 1960,
2150
+ "[145Gd]": 1961,
2151
+ "[186Ir]": 1962,
2152
+ "[184Ir]": 1963,
2153
+ "[224Rn]": 1964,
2154
+ "[185Ir]": 1965,
2155
+ "[182Ir]": 1966,
2156
+ "[184Hf]": 1967,
2157
+ "[200Pt]": 1968,
2158
+ "[227Pa]": 1969,
2159
+ "[178Yb]": 1970,
2160
+ "[72Br-]": 1971,
2161
+ "[72BrH]": 1972,
2162
+ "[248Am]": 1973,
2163
+ "[238Th]": 1974,
2164
+ "[161Gd]": 1975,
2165
+ "[35S-2]": 1976,
2166
+ "[107Ag]": 1977,
2167
+ "[FeH6-4]": 1978,
2168
+ "[89Sr]": 1979,
2169
+ "[SnH3-]": 1980,
2170
+ "[SeH3]": 1981,
2171
+ "[TeH3+]": 1982,
2172
+ "[SbH4+]": 1983,
2173
+ "[AsH4+]": 1984,
2174
+ "[4He]": 1985,
2175
+ "[AsH3-]": 1986,
2176
+ "[1HH]": 1987,
2177
+ "[3H+]": 1988,
2178
+ "[82Rb]": 1989,
2179
+ "[85Sr]": 1990,
2180
+ "[90Sr]": 1991,
2181
+ "[137Cs]": 1992,
2182
+ "[133Ba]": 1993,
2183
+ "[131Cs]": 1994,
2184
+ "[SbH5]": 1995,
2185
+ "[224Ra]": 1996,
2186
+ "[22Na]": 1997,
2187
+ "[210Bi]": 1998,
2188
+ "[214Bi]": 1999,
2189
+ "[228Ra]": 2000,
2190
+ "[127Sb]": 2001,
2191
+ "[136Cs]": 2002,
2192
+ "[125Sb]": 2003,
2193
+ "[134Cs]": 2004,
2194
+ "[140Ba]": 2005,
2195
+ "[45Ca]": 2006,
2196
+ "[206Pb]": 2007,
2197
+ "[207Pb]": 2008,
2198
+ "[24Na]": 2009,
2199
+ "[86Rb]": 2010,
2200
+ "[212Bi]": 2011,
2201
+ "[208Pb]": 2012,
2202
+ "[124Sb]": 2013,
2203
+ "[204Pb]": 2014,
2204
+ "[44K]": 2015,
2205
+ "[129Te]": 2016,
2206
+ "[113Sn]": 2017,
2207
+ "[204Tl]": 2018,
2208
+ "[87Sr]": 2019,
2209
+ "[208Tl]": 2020,
2210
+ "[87Rb]": 2021,
2211
+ "[47Ca]": 2022,
2212
+ "[135Cs]": 2023,
2213
+ "[216Po]": 2024,
2214
+ "[137Ba]": 2025,
2215
+ "[207Bi]": 2026,
2216
+ "[212Po]": 2027,
2217
+ "[79Se]": 2028,
2218
+ "[223Ra]": 2029,
2219
+ "[86Sr]": 2030,
2220
+ "[122Sb]": 2031,
2221
+ "[26Al]": 2032,
2222
+ "[32Si]": 2033,
2223
+ "[126Sn]": 2034,
2224
+ "[225Ra]": 2035,
2225
+ "[114In]": 2036,
2226
+ "[72Ga]": 2037,
2227
+ "[132Te]": 2038,
2228
+ "[10Be]": 2039,
2229
+ "[125Sn]": 2040,
2230
+ "[73As]": 2041,
2231
+ "[206Bi]": 2042,
2232
+ "[117Sn]": 2043,
2233
+ "[40Ca]": 2044,
2234
+ "[41Ca]": 2045,
2235
+ "[89Rb]": 2046,
2236
+ "[116In]": 2047,
2237
+ "[129Sb]": 2048,
2238
+ "[91Sr]": 2049,
2239
+ "[71Ge]": 2050,
2240
+ "[139Ba]": 2051,
2241
+ "[69Ga]": 2052,
2242
+ "[120Sb]": 2053,
2243
+ "[121Sn]": 2054,
2244
+ "[123Sn]": 2055,
2245
+ "[131Te]": 2056,
2246
+ "[77Ge]": 2057,
2247
+ "[135Ba]": 2058,
2248
+ "[82Sr]": 2059,
2249
+ "[43K]": 2060,
2250
+ "[131Ba]": 2061,
2251
+ "[92Sr]": 2062,
2252
+ "[88Rb]": 2063,
2253
+ "[129Cs]": 2064,
2254
+ "[144Cs]": 2065,
2255
+ "[127Cs]": 2066,
2256
+ "[200Tl]": 2067,
2257
+ "[202Tl]": 2068,
2258
+ "[141Ba]": 2069,
2259
+ "[117Sb]": 2070,
2260
+ "[116Sb]": 2071,
2261
+ "[78As]": 2072,
2262
+ "[131Sb]": 2073,
2263
+ "[126Sb]": 2074,
2264
+ "[128Sb]": 2075,
2265
+ "[130Sb]": 2076,
2266
+ "[67Ge]": 2077,
2267
+ "[68Ge]": 2078,
2268
+ "[78Ge]": 2079,
2269
+ "[66Ge]": 2080,
2270
+ "[223Fr]": 2081,
2271
+ "[132Cs]": 2082,
2272
+ "[125Cs]": 2083,
2273
+ "[138Cs]": 2084,
2274
+ "[133Te]": 2085,
2275
+ "[84Rb]": 2086,
2276
+ "[83Rb]": 2087,
2277
+ "[81Rb]": 2088,
2278
+ "[142Ba]": 2089,
2279
+ "[200Bi]": 2090,
2280
+ "[115Sb]": 2091,
2281
+ "[194Tl]": 2092,
2282
+ "[70Se]": 2093,
2283
+ "[112In]": 2094,
2284
+ "[118Sb]": 2095,
2285
+ "[70Ga]": 2096,
2286
+ "[27Mg]": 2097,
2287
+ "[202Bi]": 2098,
2288
+ "[83Se]": 2099,
2289
+ "[9Li]": 2100,
2290
+ "[69As]": 2101,
2291
+ "[79Rb]": 2102,
2292
+ "[81Sr]": 2103,
2293
+ "[83Sr]": 2104,
2294
+ "[78Se]": 2105,
2295
+ "[109In]": 2106,
2296
+ "[29Al]": 2107,
2297
+ "[118Sn]": 2108,
2298
+ "[117In]": 2109,
2299
+ "[119Sb]": 2110,
2300
+ "[114Sn]": 2111,
2301
+ "[138Ba]": 2112,
2302
+ "[69Ge]": 2113,
2303
+ "[73Ga]": 2114,
2304
+ "[74Ge]": 2115,
2305
+ "[206Tl]": 2116,
2306
+ "[199Tl]": 2117,
2307
+ "[130Cs]": 2118,
2308
+ "[28Mg]": 2119,
2309
+ "[116Te]": 2120,
2310
+ "[112Sn]": 2121,
2311
+ "[126Ba]": 2122,
2312
+ "[211Bi]": 2123,
2313
+ "[81Se]": 2124,
2314
+ "[127Sn]": 2125,
2315
+ "[143Cs]": 2126,
2316
+ "[134Te]": 2127,
2317
+ "[80Sr]": 2128,
2318
+ "[45K]": 2129,
2319
+ "[215Po]": 2130,
2320
+ "[207Po]": 2131,
2321
+ "[111Sn]": 2132,
2322
+ "[211Po]": 2133,
2323
+ "[128Ba]": 2134,
2324
+ "[198Tl]": 2135,
2325
+ "[227Ra]": 2136,
2326
+ "[213Po]": 2137,
2327
+ "[220Ra]": 2138,
2328
+ "[128Sn]": 2139,
2329
+ "[203Po]": 2140,
2330
+ "[205Po]": 2141,
2331
+ "[65Ga]": 2142,
2332
+ "[197Tl]": 2143,
2333
+ "[88Sr]": 2144,
2334
+ "[110In]": 2145,
2335
+ "[31Si]": 2146,
2336
+ "[201Bi]": 2147,
2337
+ "[121Te]": 2148,
2338
+ "[205Bi]": 2149,
2339
+ "[203Bi]": 2150,
2340
+ "[195Tl]": 2151,
2341
+ "[209Tl]": 2152,
2342
+ "[110Sn]": 2153,
2343
+ "[222Fr]": 2154,
2344
+ "[207At]": 2155,
2345
+ "[119In]": 2156,
2346
+ "[As@]": 2157,
2347
+ "[129IH]": 2158,
2348
+ "[157Dy]": 2159,
2349
+ "[111IH]": 2160,
2350
+ "[230Ra]": 2161,
2351
+ "[144Pr+3]": 2162,
2352
+ "[SiH3+]": 2163,
2353
+ "[3He]": 2164,
2354
+ "[AsH5]": 2165,
2355
+ "[72Se]": 2166,
2356
+ "[95Tc]": 2167,
2357
+ "[103Pd]": 2168,
2358
+ "[121Sn+2]": 2169,
2359
+ "[211Rn]": 2170,
2360
+ "[38SH2]": 2171,
2361
+ "[127IH]": 2172,
2362
+ "[74Br-]": 2173,
2363
+ "[133I-]": 2174,
2364
+ "[100Tc+4]": 2175,
2365
+ "[100Tc]": 2176,
2366
+ "[36Cl-]": 2177,
2367
+ "[89Y+3]": 2178,
2368
+ "[104Rh]": 2179,
2369
+ "[152Sm]": 2180,
2370
+ "[226Ra]": 2181,
2371
+ "[19FH]": 2182,
2372
+ "[104Pd]": 2183,
2373
+ "[148Gd]": 2184,
2374
+ "[157Lu]": 2185,
2375
+ "[33SH2]": 2186,
2376
+ "[121I-]": 2187,
2377
+ "[17FH]": 2188,
2378
+ "[71Se]": 2189,
2379
+ "[157Sm]": 2190,
2380
+ "[148Tb]": 2191,
2381
+ "[164Dy]": 2192,
2382
+ "[15OH2]": 2193,
2383
+ "[15O+]": 2194,
2384
+ "[39K]": 2195,
2385
+ "[40Ar]": 2196,
2386
+ "[50Cr+3]": 2197,
2387
+ "[50Cr]": 2198,
2388
+ "[52Ti]": 2199,
2389
+ "[103Pd+2]": 2200,
2390
+ "[130Ba]": 2201,
2391
+ "[142Pm]": 2202,
2392
+ "[153Gd+3]": 2203,
2393
+ "[151Eu]": 2204,
2394
+ "[103Rh]": 2205,
2395
+ "[124Xe]": 2206,
2396
+ "[152Tb]": 2207,
2397
+ "[17OH2]": 2208,
2398
+ "[20Ne]": 2209,
2399
+ "[52Fe]": 2210,
2400
+ "[94Zr+4]": 2211,
2401
+ "[94Zr]": 2212,
2402
+ "[149Pr]": 2213,
2403
+ "[16OH2]": 2214,
2404
+ "[53Cr+6]": 2215,
2405
+ "[53Cr]": 2216,
2406
+ "[81Br-]": 2217,
2407
+ "[112Pd]": 2218,
2408
+ "[125Xe]": 2219,
2409
+ "[155Gd]": 2220,
2410
+ "[157Gd]": 2221,
2411
+ "[168Yb]": 2222,
2412
+ "[184Os]": 2223,
2413
+ "[166Tb]": 2224,
2414
+ "[221Fr]": 2225,
2415
+ "[212Ra]": 2226,
2416
+ "[75Br-]": 2227,
2417
+ "[79Br-]": 2228,
2418
+ "[113Ag]": 2229,
2419
+ "[23Na]": 2230,
2420
+ "[34Cl-]": 2231,
2421
+ "[34ClH]": 2232,
2422
+ "[38Cl-]": 2233,
2423
+ "[56Fe]": 2234,
2424
+ "[68Cu]": 2235,
2425
+ "[77Br-]": 2236,
2426
+ "[90Zr+4]": 2237,
2427
+ "[90Zr]": 2238,
2428
+ "[102Pd]": 2239,
2429
+ "[154Eu+3]": 2240,
2430
+ "[57Mn]": 2241,
2431
+ "[165Tm]": 2242,
2432
+ "[152Dy]": 2243,
2433
+ "[217At]": 2244,
2434
+ "[77se]": 2245,
2435
+ "[13cH-]": 2246,
2436
+ "[122Te]": 2247,
2437
+ "[156Gd]": 2248,
2438
+ "[124Te]": 2249,
2439
+ "[53Ni]": 2250,
2440
+ "[131Xe]": 2251,
2441
+ "[174Hf+4]": 2252,
2442
+ "[174Hf]": 2253,
2443
+ "[76Se]": 2254,
2444
+ "[168Tm]": 2255,
2445
+ "[167Dy]": 2256,
2446
+ "[154Gd]": 2257,
2447
+ "[95Ru]": 2258,
2448
+ "[210At]": 2259,
2449
+ "[85Br]": 2260,
2450
+ "[59Co]": 2261,
2451
+ "[122Xe]": 2262,
2452
+ "[27Al]": 2263,
2453
+ "[54Cr]": 2264,
2454
+ "[198Hg]": 2265,
2455
+ "[85Rb+]": 2266,
2456
+ "[214Tl]": 2267,
2457
+ "[229Rn]": 2268,
2458
+ "[218Pb]": 2269,
2459
+ "[218Bi]": 2270,
2460
+ "[167Tm+3]": 2271,
2461
+ "[18o+]": 2272,
2462
+ "[P@@H+]": 2273,
2463
+ "[P@H+]": 2274,
2464
+ "[13N+]": 2275,
2465
+ "[212Pb+2]": 2276,
2466
+ "[217Bi]": 2277,
2467
+ "[249Cf+2]": 2278,
2468
+ "[18OH3+]": 2279,
2469
+ "[90Sr-]": 2280,
2470
+ "[Cf+3]": 2281,
2471
+ "[200Hg]": 2282,
2472
+ "[86Tc]": 2283,
2473
+ "[141Pr+3]": 2284,
2474
+ "[141Pr]": 2285,
2475
+ "[16nH]": 2286,
2476
+ "[14NH4+]": 2287,
2477
+ "[132Xe]": 2288,
2478
+ "[83Kr]": 2289,
2479
+ "[70Zn+2]": 2290,
2480
+ "[137Ba+2]": 2291,
2481
+ "[36Ar]": 2292,
2482
+ "[38Ar]": 2293,
2483
+ "[21Ne]": 2294,
2484
+ "[126Xe]": 2295,
2485
+ "[136Xe]": 2296,
2486
+ "[128Xe]": 2297,
2487
+ "[134Xe]": 2298,
2488
+ "[84Kr]": 2299,
2489
+ "[86Kr]": 2300,
2490
+ "[78Kr]": 2301,
2491
+ "[80Kr]": 2302,
2492
+ "[82Kr]": 2303,
2493
+ "[67Zn+2]": 2304,
2494
+ "[65Cu+2]": 2305,
2495
+ "[110Te]": 2306,
2496
+ "[58Fe+3]": 2307,
2497
+ "[142Nd]": 2308,
2498
+ "[38K]": 2309,
2499
+ "[198Au+3]": 2310,
2500
+ "[122IH]": 2311,
2501
+ "[38PH3]": 2312,
2502
+ "[130I-]": 2313,
2503
+ "[40K+]": 2314,
2504
+ "[38K+]": 2315,
2505
+ "[28Mg+2]": 2316,
2506
+ "[208Tl+]": 2317,
2507
+ "[13OH2]": 2318,
2508
+ "[198Bi]": 2319,
2509
+ "[192Bi]": 2320,
2510
+ "[194Bi]": 2321,
2511
+ "[196Bi]": 2322,
2512
+ "[132I-]": 2323,
2513
+ "[83Sr+2]": 2324,
2514
+ "[169Er+3]": 2325,
2515
+ "[122I-]": 2326,
2516
+ "[120I-]": 2327,
2517
+ "[92Sr+2]": 2328,
2518
+ "[126I-]": 2329,
2519
+ "[24Mg]": 2330,
2520
+ "[84Sr]": 2331,
2521
+ "[118Pd+2]": 2332,
2522
+ "[118Pd]": 2333,
2523
+ "[AsH4]": 2334,
2524
+ "[127I-]": 2335,
2525
+ "[9C-]": 2336,
2526
+ "[11CH3+]": 2337,
2527
+ "[17B]": 2338,
2528
+ "[7B]": 2339,
2529
+ "[4HH]": 2340,
2530
+ "[18C-]": 2341,
2531
+ "[22CH3-]": 2342,
2532
+ "[22CH4]": 2343,
2533
+ "[17C-]": 2344,
2534
+ "[15CH3]": 2345,
2535
+ "[16CH3]": 2346,
2536
+ "[11NH3]": 2347,
2537
+ "[21NH3]": 2348,
2538
+ "[11N-]": 2349,
2539
+ "[11NH]": 2350,
2540
+ "[16CH]": 2351,
2541
+ "[17CH2]": 2352,
2542
+ "[99Ru+2]": 2353,
2543
+ "[181Ta+2]": 2354,
2544
+ "[181Ta]": 2355,
2545
+ "[20CH]": 2356,
2546
+ "[32PH2]": 2357,
2547
+ "[55Fe+2]": 2358,
2548
+ "[SH3]": 2359,
2549
+ "[S@H]": 2360,
2550
+ "[UNK]": 2361
2551
+ },
2552
+ "merges": []
2553
+ }
2554
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[CLS]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[SEP]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[PAD]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[MASK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "2361": {
36
+ "content": "[UNK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "extra_special_tokens": {},
47
+ "mask_token": "[MASK]",
48
+ "max_length": 256,
49
+ "model_max_length": 256,
50
+ "pad_to_multiple_of": 8,
51
+ "pad_token": "[PAD]",
52
+ "pad_token_type_id": 0,
53
+ "padding_side": "right",
54
+ "sep_token": "[SEP]",
55
+ "stride": 0,
56
+ "tokenizer_class": "PreTrainedTokenizerFast",
57
+ "truncation_side": "right",
58
+ "truncation_strategy": "longest_first",
59
+ "unk_token": "[UNK]"
60
+ }