class AutoMLSequentialWrapper:
def __init__(self, wrapped_pipeline, auto_ml_strategy, validation_technique, score_function, hyperparams_repository, n_iters):
self.toute = toute...
def fit(self, di, eo):
for i in n_iters:
hps: List[HyperparameterSamples], scores: List[float] = hyperparams_repository.load_all()
auto_ml_strategy = auto_ml_strategy.fit(hps, scores)
next_model_to_try_hps = auto_ml_strategy.guess_next_best_params(i, n_iters, wrapped_pipeline.get_hyperparams_space())
hyperparams_repository.register_new_untrained_trial(next_model_to_try_hps)
validation_wrapper = validation_technique(copy(wrapped_pipeline).set_hyperparams(next_model_to_try_hps))
validation_wrapper, predicted_eo = validation_wrapper.fit_transform(di, eo)
score = score_function(predicted_eo, eo) # TODO: review order of arguments here.
hyperparams_repository.set_score_for_trial(next_model_to_try_hps, score)
I'd like to validate the OOP object structure. For instance, what will we do when we'll run trials in parallel? This for loop is not enough, it'd be more like a pool of workers that tries the N
next best samples.