devjas1 commited on
Commit
d6ff372
·
1 Parent(s): fe030dd

(FEAT)[Add Training Types Module]: Introduce core data structures and types for training system, including TrainingConfig and TrainingProgress classes, along with cross-validation strategies and data augmentation functionality.

Browse files
Files changed (1) hide show
  1. utils/training_types.py +128 -0
utils/training_types.py CHANGED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Defines core data structures and types for the training system.
3
+
4
+ This module centralizes data classes like TrainingConfig and helper
5
+ functions to avoid circular dependencies between the TrainingManager
6
+ and TrainingEngine.
7
+ """
8
+
9
+ from dataclasses import dataclass, asdict, field
10
+ from enum import Enum
11
+ from typing import List, Optional, Dict, Any, Tuple
12
+ from datetime import datetime
13
+ import numpy as np
14
+
15
+ from sklearn.model_selection import StratifiedKFold, KFold, TimeSeriesSplit
16
+
17
+
18
+ class TrainingStatus(Enum):
19
+ """Training job status enumeration"""
20
+
21
+ PENDING = "pending"
22
+ RUNNING = "running"
23
+ COMPLETED = "completed"
24
+ FAILED = "failed"
25
+ CANCELLED = "cancelled"
26
+
27
+
28
+ class CVStrategy(Enum):
29
+ """Cross-validation strategy enumeration"""
30
+
31
+ STRATIFIED_KFOLD = "stratified_kfold"
32
+ KFOLD = "kfold"
33
+ TIME_SERIES_SPLIT = "time_series_split"
34
+
35
+
36
+ @dataclass
37
+ class TrainingConfig:
38
+ """Training configuration parameters"""
39
+
40
+ model_name: str
41
+ dataset_path: str
42
+ target_len: int = 500
43
+ batch_size: int = 16
44
+ epochs: int = 10
45
+ learning_rate: float = 1e-3
46
+ num_folds: int = 10
47
+ baseline_correction: bool = True
48
+ smoothing: bool = True
49
+ normalization: bool = True
50
+ modality: str = "raman"
51
+ device: str = "auto" # auto, cpu, cuda
52
+ cv_strategy: str = "stratified_kfold" # New field for CV strategy
53
+ spectral_weight: float = 0.1 # Weight for spectroscopy-specific metrics
54
+ enable_augmentation: bool = False # Enable data augmentation
55
+ noise_level: float = 0.01 # Noise level for augmentation
56
+
57
+ def to_dict(self) -> Dict[str, Any]:
58
+ """Convert to dictionary for serialization"""
59
+ return asdict(self)
60
+
61
+
62
+ @dataclass
63
+ class TrainingProgress:
64
+ """Training progress tracking with enhanced metrics"""
65
+
66
+ current_fold: int = 0
67
+ total_folds: int = 10
68
+ current_epoch: int = 0
69
+ total_epochs: int = 10
70
+ current_loss: float = 0.0
71
+ current_accuracy: float = 0.0
72
+ fold_accuracies: List[float] = field(default_factory=list)
73
+ confusion_matrices: List[List[List[int]]] = field(default_factory=list)
74
+ spectroscopy_metrics: List[Dict[str, float]] = field(default_factory=list)
75
+ start_time: Optional[datetime] = None
76
+ end_time: Optional[datetime] = None
77
+
78
+
79
+ def get_cv_splitter(strategy: str, n_splits: int = 10, random_state: int = 42):
80
+ """Get cross-validation splitter based on strategy"""
81
+ if strategy == "stratified_kfold":
82
+ return StratifiedKFold(
83
+ n_splits=n_splits, shuffle=True, random_state=random_state
84
+ )
85
+ elif strategy == "kfold":
86
+ return KFold(n_splits=n_splits, shuffle=True, random_state=random_state)
87
+ elif strategy == "time_series_split":
88
+ return TimeSeriesSplit(n_splits=n_splits)
89
+ else:
90
+ # Default to stratified k-fold
91
+ return StratifiedKFold(
92
+ n_splits=n_splits, shuffle=True, random_state=random_state
93
+ )
94
+
95
+
96
+ def augment_spectral_data(
97
+ X: np.ndarray,
98
+ y: np.ndarray,
99
+ noise_level: float = 0.01,
100
+ augmentation_factor: int = 2,
101
+ ) -> Tuple[np.ndarray, np.ndarray]:
102
+ """Augment spectral data with realistic noise and variations"""
103
+ if augmentation_factor <= 1:
104
+ return X, y
105
+
106
+ augmented_X = [X]
107
+ augmented_y = [y]
108
+
109
+ for i in range(augmentation_factor - 1):
110
+ # Add Gaussian noise
111
+ noise = np.random.normal(0, noise_level, X.shape)
112
+ X_noisy = X + noise
113
+
114
+ # Add baseline drift (common in spectroscopy)
115
+ baseline_drift = np.random.normal(0, noise_level * 0.5, (X.shape[0], 1))
116
+ X_drift = X_noisy + baseline_drift
117
+
118
+ # Add intensity scaling variation
119
+ intensity_scale = np.random.normal(1.0, 0.05, (X.shape[0], 1))
120
+ X_scaled = X_drift * intensity_scale
121
+
122
+ # Ensure no negative values
123
+ X_scaled = np.maximum(X_scaled, 0)
124
+
125
+ augmented_X.append(X_scaled)
126
+ augmented_y.append(y)
127
+
128
+ return np.vstack(augmented_X), np.hstack(augmented_y)