Skip to content

multi_layer_perceptron

MultilayerPerceptron(learning_rate, output_size, hidden_dimensions=None, *, activation_function=None)

Bases: LightningModule

A multilayer perceptron.

Initialize the model.

Parameters:

Name Type Description Default
learning_rate float

The learning rate.

required
output_size int

The size of the output.

required
hidden_dimensions list[int] | None

The dimensions of the hidden layers.

None
activation_function type[Module] | None

The activation function to use. Defaults to ReLU if not provided.

None
Source code in src/flowcean/torch/multi_layer_perceptron.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
def __init__(
    self,
    learning_rate: float,
    output_size: int,
    hidden_dimensions: list[int] | None = None,
    *,
    activation_function: type[torch.nn.Module] | None = None,
) -> None:
    """Initialize the model.

    Args:
        learning_rate: The learning rate.
        output_size: The size of the output.
        hidden_dimensions: The dimensions of the hidden layers.
        activation_function: The activation function to use.
            Defaults to ReLU if not provided.
    """
    super().__init__()
    if hidden_dimensions is None:
        hidden_dimensions = []
    self.save_hyperparameters()
    self.learning_rate = learning_rate

    layers: list[Module] = []
    for dimension in hidden_dimensions:
        layers.extend(
            (
                torch.nn.LazyLinear(dimension),
                activation_function()
                if activation_function
                else torch.nn.ReLU(),
            ),
        )
    layers.append(torch.nn.LazyLinear(output_size))
    self.model = torch.nn.Sequential(*layers)