Skip to content

linear_regression

LinearRegression(*, output_size, learning_rate=0.001, loss=None, a=None, b=None)

Bases: SupervisedIncrementalLearner

Linear regression learner.

Initialize the learner.

Parameters:

Name Type Description Default
output_size int

The size of the output.

required
learning_rate float

The learning rate.

0.001
loss Module | None

The loss function.

None
a Tensor | None

Initial weights. If None (the default), random weights are used.

None
b Tensor | None

Initial bias. If None (the default), random bias is used.

None
Source code in src/flowcean/torch/linear_regression.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
def __init__(
    self,
    *,
    output_size: int,
    learning_rate: float = 1e-3,
    loss: nn.Module | None = None,
    a: Tensor | None = None,
    b: Tensor | None = None,
) -> None:
    """Initialize the learner.

    Args:
        output_size: The size of the output.
        learning_rate: The learning rate.
        loss: The loss function.
        a: Initial weights. If None (the default), random weights are used.
        b: Initial bias. If None (the default), random bias is used.
    """
    self.model = nn.LazyLinear(output_size)
    if a is not None:
        self.model.weight.data = a
    if b is not None:
        self.model.bias.data = b
    self.loss = loss or nn.MSELoss()
    self.optimizer = SGD(
        self.model.parameters(),
        lr=learning_rate,
    )