Coverage for src/meta_learning/meta_learning_modules/utils_modules/configurations.py: 0%
70 statements
« prev ^ index » next coverage.py v7.10.5, created at 2025-09-03 12:49 +0900
« prev ^ index » next coverage.py v7.10.5, created at 2025-09-03 12:49 +0900
1"""
2Configuration Classes for Meta-Learning Utilities
3================================================
5Author: Benedict Chen (benedict@benedictchen.com)
7This module contains all configuration dataclasses and classes for meta-learning
8utilities, providing type-safe configuration management with sensible defaults.
9"""
11from dataclasses import dataclass
12from typing import Optional, List, Dict, Any
15@dataclass
16class TaskConfiguration:
17 """Configuration for meta-learning tasks."""
18 n_way: int = 5
19 k_shot: int = 5
20 q_query: int = 15
21 num_tasks: int = 1000
22 task_type: str = "classification"
23 augmentation_strategy: str = "basic" # basic, advanced, none
25 # Configuration options for difficulty estimation methods
26 difficulty_estimation_method: str = "pairwise_distance" # "pairwise_distance", "silhouette", "entropy", "knn"
27 use_research_accurate_difficulty: bool = False # Enable research-backed methods
30@dataclass
31class EvaluationConfig:
32 """Configuration for meta-learning evaluation."""
33 confidence_intervals: bool = True
34 num_bootstrap_samples: int = 1000
35 significance_level: float = 0.05
36 track_adaptation_curve: bool = True
37 compute_uncertainty: bool = True
39 # Configuration options for confidence interval methods
40 ci_method: str = "bootstrap" # "bootstrap", "t_distribution", "meta_learning_standard", "bca_bootstrap"
41 use_research_accurate_ci: bool = False # Enable research-backed CI methods
42 num_episodes: int = 600 # Standard meta-learning evaluation protocol
44 # Additional configuration for advanced CI methods
45 min_sample_size_for_bootstrap: int = 30 # Minimum sample size for bootstrap vs t-distribution
46 auto_method_selection: bool = True # Automatically select best CI method based on data
49class DatasetConfig:
50 """Configuration for meta-learning dataset creation."""
52 def __init__(
53 self,
54 dataset_type: str = "episodic",
55 augmentation_strategy: str = "minimal",
56 shuffle: bool = True,
57 stratified: bool = True,
58 normalize: bool = True,
59 cache_episodes: bool = False,
60 **kwargs
61 ):
62 self.dataset_type = dataset_type
63 self.augmentation_strategy = augmentation_strategy
64 self.shuffle = shuffle
65 self.stratified = stratified
66 self.normalize = normalize
67 self.cache_episodes = cache_episodes
68 for key, value in kwargs.items():
69 setattr(self, key, value)
72class MetricsConfig:
73 """Configuration for evaluation metrics computation."""
75 def __init__(
76 self,
77 compute_accuracy: bool = True,
78 compute_loss: bool = True,
79 compute_adaptation_speed: bool = False,
80 compute_uncertainty: bool = False,
81 track_gradients: bool = False,
82 save_predictions: bool = False,
83 **kwargs
84 ):
85 self.compute_accuracy = compute_accuracy
86 self.compute_loss = compute_loss
87 self.compute_adaptation_speed = compute_adaptation_speed
88 self.compute_uncertainty = compute_uncertainty
89 self.track_gradients = track_gradients
90 self.save_predictions = save_predictions
91 for key, value in kwargs.items():
92 setattr(self, key, value)
95class StatsConfig:
96 """Configuration for statistical analysis."""
98 def __init__(
99 self,
100 confidence_level: float = 0.95,
101 num_bootstrap_samples: int = 1000,
102 significance_test: str = "t_test",
103 multiple_comparison_correction: str = "bonferroni",
104 effect_size_method: str = "cohen_d",
105 **kwargs
106 ):
107 self.confidence_level = confidence_level
108 self.num_bootstrap_samples = num_bootstrap_samples
109 self.significance_test = significance_test
110 self.multiple_comparison_correction = multiple_comparison_correction
111 self.effect_size_method = effect_size_method
112 for key, value in kwargs.items():
113 setattr(self, key, value)
116class CurriculumConfig:
117 """Configuration for curriculum learning strategies."""
119 def __init__(
120 self,
121 strategy: str = "difficulty_based",
122 initial_difficulty: float = 0.3,
123 difficulty_increment: float = 0.1,
124 difficulty_threshold: float = 0.8,
125 adaptation_patience: int = 5,
126 **kwargs
127 ):
128 self.strategy = strategy
129 self.initial_difficulty = initial_difficulty
130 self.difficulty_increment = difficulty_increment
131 self.difficulty_threshold = difficulty_threshold
132 self.adaptation_patience = adaptation_patience
133 for key, value in kwargs.items():
134 setattr(self, key, value)
137class DiversityConfig:
138 """Configuration for task diversity tracking."""
140 def __init__(
141 self,
142 diversity_metric: str = "cosine_similarity",
143 track_class_distribution: bool = True,
144 track_feature_diversity: bool = True,
145 diversity_threshold: float = 0.7,
146 **kwargs
147 ):
148 self.diversity_metric = diversity_metric
149 self.track_class_distribution = track_class_distribution
150 self.track_feature_diversity = track_feature_diversity
151 self.diversity_threshold = diversity_threshold
152 for key, value in kwargs.items():
153 setattr(self, key, value)