Spaces:
Sleeping
Sleeping
File size: 4,951 Bytes
32ba915 7184c06 32ba915 7184c06 32ba915 7184c06 32ba915 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
"""
Test suite for ML Polymer Aging enhanced components
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import numpy as np
import torch
from modules.enhanced_data import (
EnhancedDataManager,
ContextualSpectrum,
SpectralMetadata,
)
from modules.transparent_ai import TransparentAIEngine, UncertaintyEstimator
from modules.educational_framework import EducationalFramework
def test_enhanced_data_manager():
"""Test enhanced data management functionality"""
print("Testing Enhanced Data Manager...")
# Create data manager
data_manager = EnhancedDataManager()
# Create sample spectrum
x_data = np.linspace(400, 4000, 500)
y_data = np.exp(-(((x_data - 2900) / 100) ** 2)) + np.random.normal(0, 0.01, 500)
metadata = SpectralMetadata(
filename="test_spectrum.txt", instrument_type="Raman", laser_wavelength=785.0
)
spectrum = ContextualSpectrum(x_data, y_data, metadata)
# Test quality assessment
quality_score = data_manager._assess_data_quality(y_data)
print(f"Quality score: {quality_score:.3f}")
# Test preprocessing recommendations
recommendations = data_manager.get_preprocessing_recommendations(spectrum)
print(f"Preprocessing recommendations: {recommendations}")
# Test preprocessing with tracking
processed_spectrum = data_manager.preprocess_with_tracking(
spectrum, **recommendations
)
print(f"Provenance records: {len(processed_spectrum.provenance)}")
print("β
Enhanced Data Manager tests passed!")
return True
def test_transparent_ai():
"""Test transparent AI functionality"""
print("Testing Transparent AI Engine...")
# Create dummy model
class DummyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(500, 2)
def forward(self, x):
return self.linear(x)
model = DummyModel()
# Test uncertainty estimator
uncertainty_estimator = UncertaintyEstimator(model, n_samples=10)
# Create test input
x = torch.randn(1, 500)
# Test uncertainty estimation
uncertainties = uncertainty_estimator.estimate_uncertainty(x)
print(f"Uncertainty metrics: {uncertainties}")
# Test confidence intervals
intervals = uncertainty_estimator.confidence_intervals(x)
print(f"Confidence intervals: {intervals}")
# Test transparent AI engine
ai_engine = TransparentAIEngine(model)
explanation = ai_engine.predict_with_explanation(x)
print(f"Prediction: {explanation.prediction}")
print(f"Confidence: {explanation.confidence:.3f}")
print(f"Reasoning chain: {len(explanation.reasoning_chain)} steps")
print("β
Transparent AI tests passed!")
return True
def test_educational_framework():
"""Test educational framework functionality"""
print("Testing Educational Framework...")
# Create educational framework
framework = EducationalFramework()
# Initialize user
user_progress = framework.initialize_user("test_user")
print(f"User initialized: {user_progress.user_id}")
# Test competency assessment
domain = "spectroscopy_basics"
responses = [2, 1, 0] # Sample responses
results = framework.assess_user_competency(domain, responses)
print(f"Assessment results: {results['score']:.2f}")
# Test learning path generation
target_competencies = ["spectroscopy", "polymer_science"]
learning_path = framework.get_personalized_learning_path(target_competencies)
print(f"Learning path objectives: {len(learning_path)}")
# Test virtual experiment
experiment_result = framework.run_virtual_experiment(
"polymer_identification", {"polymer_type": "PE"}
)
print(f"Virtual experiment success: {experiment_result.get('success', False)}")
# Test analytics
analytics = framework.get_learning_analytics()
print(f"Analytics available: {bool(analytics)}")
print("β
Educational Framework tests passed!")
return True
def run_all_tests():
"""Run all component tests"""
print("Starting ML Polymer Aging Component Tests...\n")
tests = [
test_enhanced_data_manager,
test_transparent_ai,
test_educational_framework,
]
passed = 0
for test in tests:
try:
if test():
passed += 1
print()
except Exception as e:
print(f"β Test failed: {e}\n")
print(f"Tests completed: {passed}/{len(tests)} passed")
if passed == len(tests):
print("π All ML Polymer Aging components working correctly!")
else:
print("β οΈ Some components need attention")
if __name__ == "__main__":
run_all_tests()
|