Skip to content
Snippets Groups Projects
Commit e192c7d9 authored by Laura Christine Kühle's avatar Laura Christine Kühle
Browse files

Replaced all 'ThreeLayerNet' classes with 'ThreeLayerReLu'.

parent 777db854
No related branches found
No related tags found
No related merge requests found
# -*- coding: utf-8 -*-
"""
@author: Laura C. Kühle, Soraya Terrab (sorayaterrab)
TODO: Combine all ThreeLayerNet classes in one class -> Done
INFO: /home/laura/anaconda3/lib/python3.7/site-packages/torch/nn/modules
"""
import torch
# Define Neural Network
# Model with Linear -> 2 ReLU layers -> Linear -> SoftMax
class ThreeLayerNet(torch.nn.Module):
def __init__(self, d_in, h, d_out):
super(ThreeLayerNet, self).__init__()
self.input_linear = torch.nn.Linear(d_in, h)
self.middle_linear = torch.nn.Linear(h, h)
self.output_linear = torch.nn.Linear(h, d_out)
self.output_softmax = torch.nn.Softmax(dim=1)
def forward(self, x_in):
h_relu1 = self.input_linear(x_in).clamp(min=0)
h_relu2 = self.middle_linear(h_relu1).clamp(min=0)
y_pred = self.output_linear(h_relu2)
y_pred = self.output_softmax(y_pred)
return y_pred
# Model with Linear -> 2 ReLU layers -> Linear -> SoftMax
class ThreeLayerNetDifferentNeuronsSoftMax(torch.nn.Module):
def __init__(self, d_in, h1, h2, d_out):
super(ThreeLayerNetDifferentNeuronsSoftMax, self).__init__()
self.input_linear = torch.nn.Linear(d_in, h1)
self.middle_linear = torch.nn.Linear(h1, h2)
self.output_linear = torch.nn.Linear(h2, d_out)
self.output_softmax = torch.nn.Softmax(dim=1)
def forward(self, x_in):
h_relu1 = self.input_linear(x_in).clamp(min=0)
h_relu2 = self.middle_linear(h_relu1).clamp(min=0)
y_pred = self.output_linear(h_relu2)
y_pred = self.output_softmax(y_pred)
return y_pred
# Model with Linear -> 2 ReLU layers -> Linear -> SoftMax
class ThreeLayerNetDifferentNeuronsSigmoid(torch.nn.Module):
def __init__(self, d_in, h1, h2, d_out):
super(ThreeLayerNetDifferentNeuronsSigmoid, self).__init__()
self.input_linear = torch.nn.Linear(d_in, h1)
self.middle_linear = torch.nn.Linear(h1, h2)
self.output_linear = torch.nn.Linear(h2, d_out)
self.output_softmax = torch.nn.Sigmoid()
def forward(self, x_in):
h_relu1 = self.input_linear(x_in).clamp(min=0)
h_relu2 = self.middle_linear(h_relu1).clamp(min=0)
y_pred = self.output_linear(h_relu2)
y_pred = self.output_softmax(y_pred)
return y_pred
# Model with Linear -> ReLu -> Linear -> ReLu -> Linear -> any activation function
class ThreeLayerReLu(torch.nn.Module):
def __init__(self, config):
super().__init__()
input_size = config.pop('input_size', 5)
first_hidden_size = config.pop('first_hidden_size', 8)
second_hidden_size = config.pop('second_hidden_size', 4)
output_size = config.pop('output_size', 2)
activation_function = config.pop('activation_function', 'Sigmoid')
activation_config = config.pop('activation_config', {})
if not hasattr(torch.nn.modules.activation, activation_function):
raise ValueError('Invalid activation function: "%s"' % activation_function)
self._name = self.__class__.__name__ + '_' + str(first_hidden_size) + '_' + str(second_hidden_size) + '_'\
+ activation_function
self._input_layer = torch.nn.Linear(input_size, first_hidden_size)
self._first_hidden_layer = torch.nn.Linear(first_hidden_size, second_hidden_size)
self._second_hidden_layer = torch.nn.Linear(second_hidden_size, output_size)
self._output_layer = getattr(torch.nn.modules.activation, activation_function)(**activation_config)
def forward(self, input_data):
prediction = self._input_layer(input_data).clamp(min=0)
prediction = self._first_hidden_layer(prediction).clamp(min=0)
prediction = self._second_hidden_layer(prediction)
prediction = self._output_layer(prediction)
return prediction
def get_name(self):
return self._name
......@@ -30,7 +30,7 @@ class ModelTrainer(object):
self.batch_size = config.pop('batch_size', 500)
self._num_epochs = config.pop('num_epochs', 1000)
self._threshold = config.pop('threshold', 1e-5)
self._model = config.pop('model', 'ThreeLayerNet')
self._model = config.pop('model', 'ThreeLayerReLu')
self._model_config = config.pop('model_config', {})
self._loss_function = config.pop('loss_function', 'BCELoss')
self._loss_config = config.pop('loss_config', {})
......@@ -169,8 +169,9 @@ class ModelTrainer(object):
pass
# d_in_1, h1_1, h2_1, d_out_1 = 5, 8, 4, 2
# model = ANN_Model.ThreeLayerNetDifferentNeuronsSoftMax(d_in_1, h1_1, h2_1, d_out_1)
# model_config = {'input_size': 5, 'first_hidden_size': 8, 'second_hidden_size': 4, 'output_size': 2,
# 'activation_function': 'Softmax', 'activation_config': {'dim': 1}}
# model = ANN_Model.ThreeLayerReLu(model_config)
# learning_rate = 1e-10
# config = {'lr': learning_rate}
# # optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
......
......@@ -198,8 +198,11 @@ class ArtificialNeuralNetwork(TroubledCellDetector):
super()._reset(config)
self._stencil_len = config.pop('stencil_len', 3)
self._model = config.pop('model', 'ThreeLayerNetDifferentNeuronsSoftMax')
self._model_config = config.pop('model_config', {'d_in': self._stencil_len+2, 'h1': 8, 'h2': 4, 'd_out': 2})
self._model = config.pop('model', 'ThreeLayerReLu')
self._model_config = config.pop('model_config', {'input_size': self._stencil_len+2, 'first_hidden_size': 8,
'second_hidden_size': 4, 'output_size': 2,
'activation_function': 'Softmax',
'activation_config': {'dim': 1}})
self._model_state = config.pop('model_state', 'Train24k24k_Valid8k8k_Norm12ReLU8+4nodesSM1Adamlr1e-2MSE.pt')
if not hasattr(ANN_Model, self._model):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment