From e192c7d9572c14075358536cba1b14c7f828f032 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?K=C3=BChle=2C=20Laura=20Christine=20=28lakue103=29?= <laura.kuehle@uni-duesseldorf.de> Date: Tue, 6 Apr 2021 19:44:55 +0200 Subject: [PATCH] Replaced all 'ThreeLayerNet' classes with 'ThreeLayerReLu'. --- ANN_Model.py | 87 +++++++++++++++--------------------- Artificial_Neural_Network.py | 7 +-- Troubled_Cell_Detector.py | 7 ++- 3 files changed, 46 insertions(+), 55 deletions(-) diff --git a/ANN_Model.py b/ANN_Model.py index d806738..59821a3 100644 --- a/ANN_Model.py +++ b/ANN_Model.py @@ -1,59 +1,46 @@ # -*- coding: utf-8 -*- """ @author: Laura C. Kühle, Soraya Terrab (sorayaterrab) + +TODO: Combine all ThreeLayerNet classes in one class -> Done + +INFO: /home/laura/anaconda3/lib/python3.7/site-packages/torch/nn/modules + """ import torch # Define Neural Network -# Model with Linear -> 2 ReLU layers -> Linear -> SoftMax -class ThreeLayerNet(torch.nn.Module): - def __init__(self, d_in, h, d_out): - super(ThreeLayerNet, self).__init__() - self.input_linear = torch.nn.Linear(d_in, h) - self.middle_linear = torch.nn.Linear(h, h) - self.output_linear = torch.nn.Linear(h, d_out) - self.output_softmax = torch.nn.Softmax(dim=1) - - def forward(self, x_in): - h_relu1 = self.input_linear(x_in).clamp(min=0) - h_relu2 = self.middle_linear(h_relu1).clamp(min=0) - y_pred = self.output_linear(h_relu2) - y_pred = self.output_softmax(y_pred) - return y_pred - - -# Model with Linear -> 2 ReLU layers -> Linear -> SoftMax -class ThreeLayerNetDifferentNeuronsSoftMax(torch.nn.Module): - def __init__(self, d_in, h1, h2, d_out): - super(ThreeLayerNetDifferentNeuronsSoftMax, self).__init__() - self.input_linear = torch.nn.Linear(d_in, h1) - self.middle_linear = torch.nn.Linear(h1, h2) - self.output_linear = torch.nn.Linear(h2, d_out) - self.output_softmax = torch.nn.Softmax(dim=1) - - def forward(self, x_in): - h_relu1 = self.input_linear(x_in).clamp(min=0) - h_relu2 = self.middle_linear(h_relu1).clamp(min=0) - y_pred = self.output_linear(h_relu2) - y_pred = self.output_softmax(y_pred) - return y_pred - - -# Model with Linear -> 2 ReLU layers -> Linear -> SoftMax -class ThreeLayerNetDifferentNeuronsSigmoid(torch.nn.Module): - def __init__(self, d_in, h1, h2, d_out): - super(ThreeLayerNetDifferentNeuronsSigmoid, self).__init__() - self.input_linear = torch.nn.Linear(d_in, h1) - self.middle_linear = torch.nn.Linear(h1, h2) - self.output_linear = torch.nn.Linear(h2, d_out) - self.output_softmax = torch.nn.Sigmoid() - - def forward(self, x_in): - h_relu1 = self.input_linear(x_in).clamp(min=0) - h_relu2 = self.middle_linear(h_relu1).clamp(min=0) - y_pred = self.output_linear(h_relu2) - y_pred = self.output_softmax(y_pred) - return y_pred - +# Model with Linear -> ReLu -> Linear -> ReLu -> Linear -> any activation function +class ThreeLayerReLu(torch.nn.Module): + def __init__(self, config): + super().__init__() + + input_size = config.pop('input_size', 5) + first_hidden_size = config.pop('first_hidden_size', 8) + second_hidden_size = config.pop('second_hidden_size', 4) + output_size = config.pop('output_size', 2) + activation_function = config.pop('activation_function', 'Sigmoid') + activation_config = config.pop('activation_config', {}) + + if not hasattr(torch.nn.modules.activation, activation_function): + raise ValueError('Invalid activation function: "%s"' % activation_function) + + self._name = self.__class__.__name__ + '_' + str(first_hidden_size) + '_' + str(second_hidden_size) + '_'\ + + activation_function + + self._input_layer = torch.nn.Linear(input_size, first_hidden_size) + self._first_hidden_layer = torch.nn.Linear(first_hidden_size, second_hidden_size) + self._second_hidden_layer = torch.nn.Linear(second_hidden_size, output_size) + self._output_layer = getattr(torch.nn.modules.activation, activation_function)(**activation_config) + + def forward(self, input_data): + prediction = self._input_layer(input_data).clamp(min=0) + prediction = self._first_hidden_layer(prediction).clamp(min=0) + prediction = self._second_hidden_layer(prediction) + prediction = self._output_layer(prediction) + return prediction + + def get_name(self): + return self._name diff --git a/Artificial_Neural_Network.py b/Artificial_Neural_Network.py index 932df6e..02a8cdd 100644 --- a/Artificial_Neural_Network.py +++ b/Artificial_Neural_Network.py @@ -30,7 +30,7 @@ class ModelTrainer(object): self.batch_size = config.pop('batch_size', 500) self._num_epochs = config.pop('num_epochs', 1000) self._threshold = config.pop('threshold', 1e-5) - self._model = config.pop('model', 'ThreeLayerNet') + self._model = config.pop('model', 'ThreeLayerReLu') self._model_config = config.pop('model_config', {}) self._loss_function = config.pop('loss_function', 'BCELoss') self._loss_config = config.pop('loss_config', {}) @@ -169,8 +169,9 @@ class ModelTrainer(object): pass -# d_in_1, h1_1, h2_1, d_out_1 = 5, 8, 4, 2 -# model = ANN_Model.ThreeLayerNetDifferentNeuronsSoftMax(d_in_1, h1_1, h2_1, d_out_1) +# model_config = {'input_size': 5, 'first_hidden_size': 8, 'second_hidden_size': 4, 'output_size': 2, +# 'activation_function': 'Softmax', 'activation_config': {'dim': 1}} +# model = ANN_Model.ThreeLayerReLu(model_config) # learning_rate = 1e-10 # config = {'lr': learning_rate} # # optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) diff --git a/Troubled_Cell_Detector.py b/Troubled_Cell_Detector.py index b83b735..2d42b76 100644 --- a/Troubled_Cell_Detector.py +++ b/Troubled_Cell_Detector.py @@ -198,8 +198,11 @@ class ArtificialNeuralNetwork(TroubledCellDetector): super()._reset(config) self._stencil_len = config.pop('stencil_len', 3) - self._model = config.pop('model', 'ThreeLayerNetDifferentNeuronsSoftMax') - self._model_config = config.pop('model_config', {'d_in': self._stencil_len+2, 'h1': 8, 'h2': 4, 'd_out': 2}) + self._model = config.pop('model', 'ThreeLayerReLu') + self._model_config = config.pop('model_config', {'input_size': self._stencil_len+2, 'first_hidden_size': 8, + 'second_hidden_size': 4, 'output_size': 2, + 'activation_function': 'Softmax', + 'activation_config': {'dim': 1}}) self._model_state = config.pop('model_state', 'Train24k24k_Valid8k8k_Norm12ReLU8+4nodesSM1Adamlr1e-2MSE.pt') if not hasattr(ANN_Model, self._model): -- GitLab