Skip to content

learner

XGBoostCallbackBridge(learner, callback_manager, max_iterations=None)

Bases: TrainingCallback

Bridge between XGBoost callbacks and flowcean callbacks.

This adapter forwards XGBoost training events to flowcean callbacks.

Source code in src/flowcean/xgboost/learner.py
20
21
22
23
24
25
26
27
28
def __init__(
    self,
    learner: Named,
    callback_manager: Any,
    max_iterations: int | None = None,
) -> None:
    self.learner = learner
    self.callback_manager = callback_manager
    self.max_iterations = max_iterations

before_training(model)

Called before training starts.

Source code in src/flowcean/xgboost/learner.py
30
31
32
33
34
def before_training(self, model: Any) -> Any:
    """Called before training starts."""
    # XGBoost callbacks don't get called at the very beginning,
    # so we'll call on_learning_start from the learn method instead
    return model

after_iteration(model, epoch, evals_log)

Called after each training iteration.

Source code in src/flowcean/xgboost/learner.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
def after_iteration(
    self,
    model: Any,  # noqa: ARG002
    epoch: int,
    evals_log: dict[str, Any],
) -> bool:
    """Called after each training iteration."""
    # Calculate progress if we know the max iterations
    progress = None
    if self.max_iterations and self.max_iterations > 0:
        progress = (epoch + 1) / self.max_iterations

    # Extract metrics from evals_log if available
    metrics = {"iteration": epoch + 1}
    if evals_log:
        # Flatten the nested evals_log structure
        for dataset_name, dataset_metrics in evals_log.items():
            for metric_name, values in dataset_metrics.items():
                if values:
                    metrics[f"{dataset_name}_{metric_name}"] = values[-1]

    self.callback_manager.on_learning_progress(
        self.learner,
        progress=progress,
        metrics=metrics,
    )

    # Return False to continue training
    return False

after_training(model)

Called after training completes.

Source code in src/flowcean/xgboost/learner.py
66
67
68
69
70
def after_training(self, model: Any) -> Any:
    """Called after training completes."""
    # We'll call on_learning_end from the learn method instead
    # to have access to the wrapped model
    return model

XGBoostClassifierLearner(threshold=0.5, callbacks=None, **kwargs)

Bases: SupervisedLearner

Wrapper for XGBoost classifiers.

Parameters:

Name Type Description Default
threshold float

Decision threshold for binary classification (default: 0.5).

0.5
callbacks list[LearnerCallback] | LearnerCallback | None

Optional callbacks for progress feedback. Use None for silent learning.

None
**kwargs Any

Arguments passed to XGBClassifier (n_estimators, max_depth, etc.)

{}
Source code in src/flowcean/xgboost/learner.py
84
85
86
87
88
89
90
91
92
93
def __init__(
    self,
    threshold: float = 0.5,
    callbacks: list[LearnerCallback] | LearnerCallback | None = None,
    **kwargs: Any,
) -> None:
    self.threshold = threshold
    self.classifier = XGBClassifier(**kwargs)
    self.callback_manager = create_callback_manager(callbacks)
    super().__init__()

XGBoostRegressorLearner(callbacks=None, **kwargs)

Bases: SupervisedLearner

Wrapper for XGBoost regressor.

Parameters:

Name Type Description Default
callbacks list[LearnerCallback] | LearnerCallback | None

Optional callbacks for progress feedback. Use None for silent learning.

None
**kwargs Any

Arguments passed to XGBRegressor (n_estimators, max_depth, etc.)

{}
Source code in src/flowcean/xgboost/learner.py
162
163
164
165
166
167
168
169
def __init__(
    self,
    callbacks: list[LearnerCallback] | LearnerCallback | None = None,
    **kwargs: Any,
) -> None:
    self.regressor = XGBRegressor(**kwargs)
    self.callback_manager = create_callback_manager(callbacks)
    super().__init__()