Skip to content
Snippets Groups Projects
Commit 87920cbb authored by Laura Christine Kühle's avatar Laura Christine Kühle
Browse files

Removed unnecessary comments.

parent 1c790d43
No related branches found
No related tags found
No related merge requests found
...@@ -5,9 +5,10 @@ ...@@ -5,9 +5,10 @@
TODO: Improve 'epoch_training()' TODO: Improve 'epoch_training()'
TODO: Add ANN testing from Soraya TODO: Add ANN testing from Soraya
TODO: Add ANN classification from Soraya TODO: Add ANN classification from Soraya
TODO: Improve naming of training data (maybe different folders?) TODO: Improve naming of training data/model (maybe different folders?)
TODO: Adjust input file naming to fit training data -> Done TODO: Adjust input file naming to fit training data -> Done
TODO: Change code to add model directory if not existing -> Done TODO: Change code to add model directory if not existing -> Done
TODO: Remove unnecessary comments -> Done
""" """
import numpy as np import numpy as np
...@@ -67,72 +68,23 @@ class ModelTrainer(object): ...@@ -67,72 +68,23 @@ class ModelTrainer(object):
@staticmethod @staticmethod
def _read_data(input_file, output_file): def _read_data(input_file, output_file):
return map(torch.tensor, (np.load(input_file), np.load(output_file))) return map(torch.tensor, (np.load(input_file), np.load(output_file)))
# return DataLoader(TensorDataset(input_data, output_data), batch_size=self._batch_size, shuffle=True)
def epoch_training(self): def epoch_training(self):
# Get Training/validation Datasets from Saved Files and Map to Torch Tensor and batched-datasets # Get Training/validation Datasets from Saved Files and Map to Torch Tensor and batched-datasets
# nt_smooth = 24000 x_train, y_train = self._training_data['train']
# nt_troubled = 24000
# normalize = 1
# batch = 500
# train_input = "TrainInput"+str(int(nt_smooth / 1000))+"k"+str(int(nt_troubled / 1000))+"k"+"Normalized"+str(
# normalize)+".npy"
# train_output = "TrainOutput"+str(int(nt_smooth / 1000))+"k"+str(int(nt_troubled / 1000))+"k"+"Normalized"+str(
# 0)+".npy"
# x_train = np.load(train_input)
# y_train = np.load(train_output)
[x_train, y_train] = self._training_data['train']
train_ds = TensorDataset(x_train, y_train) train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size=self._batch_size, shuffle=True) train_dl = DataLoader(train_ds, batch_size=self._batch_size, shuffle=True)
#
# nv_smooth = 8000 x_valid, y_valid = self._training_data['validation']
# nv_troubled = 8000
#
# valid_input = "ValidInput"+str(int(nv_smooth / 1000))+"k"+str(int(nv_troubled / 1000))+"k"+"Normalized"+str(
# normalize)+".npy"
# valid_output = "ValidOutput"+str(int(nv_smooth / 1000))+"k"+str(int(nv_troubled / 1000))+"k"+"Normalized"+str(
# 0)+".npy"
# x_valid = np.load(valid_input)
# y_valid = np.load(valid_output)
[x_valid, y_valid] = self._training_data['validation']
valid_ds = TensorDataset(x_valid, y_valid) valid_ds = TensorDataset(x_valid, y_valid)
valid_dl = DataLoader(valid_ds, batch_size=self._batch_size * 2) valid_dl = DataLoader(valid_ds, batch_size=self._batch_size * 2)
# Model using torch.nn.Sequential, another format
# model = torch.nn.Sequential(
# torch.nn.Linear(d_in, h),
# torch.nn.ReLU(),
# torch.nn.Linear(h, d_out),
# )
# d_in is input dimension; h is hidden dimension; d_out is output dimension.
# d_in, h, d_out = 5, 10, 2
# d_in is input dimension; h1 is first hidden dimension;
# h2 is second hidden dimension; d_out is output dimension.
# d_in_1, h1_1, h2_1, d_out_1 = 5, 8, 4, 2
# Defining Model
# model = ThreeLayerNetDifferentNeuronsSoftMax(d_in_1, h1_1, h2_1, d_out_1)
# Define Loss Function and Optimization method
# loss_fn = torch.nn.MSELoss(reduction='sum')
# loss_fn = torch.nn.CrossEntropyLoss()
# loss_fn = torch.nn.BCELoss()
# loss_fn = torch.nn.BCEWithLogitsLoss()
# learning_rate = 1e-2
# optimizer = torch.optim.Adam(self._model.parameters(), lr=learning_rate)
# optimizer = torch.optim.SGD(self._model.parameters(), lr=learning_rate, momentum=0.5)
# Training with Validation # Training with Validation
# validation_loss = torch.zeros(10)
for epoch in range(self._num_epochs): for epoch in range(self._num_epochs):
self._model.train() self._model.train()
for x_batch, y_batch in train_dl: for x_batch, y_batch in train_dl:
pred = self._model(x_batch.float()) pred = self._model(x_batch.float())
loss = self._loss_function(pred, y_batch.float()).mean() loss = self._loss_function(pred, y_batch.float()).mean()
# if Loss_fn = torch.nn.CrossEntropyLoss() use below:
# loss = loss_fn(pred, y_batch.long()[:,0]).mean()
# Run back propagation, update the weights, and zero gradients for next epoch # Run back propagation, update the weights, and zero gradients for next epoch
loss.backward() loss.backward()
...@@ -145,10 +97,6 @@ class ModelTrainer(object): ...@@ -145,10 +97,6 @@ class ModelTrainer(object):
self._loss_function(self._model(x_batch_valid.float()), y_batch_valid.float()) self._loss_function(self._model(x_batch_valid.float()), y_batch_valid.float())
for x_batch_valid, y_batch_valid in valid_dl) for x_batch_valid, y_batch_valid in valid_dl)
# if Loss_fn = torch.nn.CrossEntropyLoss() use below:
# valid_loss = sum(loss_fn(model(x_batch_valid.float()), y_batch_valid.long()[:,
# 0]) for x_batch_valid, y_batch_valid in valid_dl)
if epoch % 100 == 99: if epoch % 100 == 99:
self._validation_loss[int(epoch / 99)-1] = valid_loss / len(valid_dl) self._validation_loss[int(epoch / 99)-1] = valid_loss / len(valid_dl)
print(epoch, valid_loss / len(valid_dl)) print(epoch, valid_loss / len(valid_dl))
...@@ -174,18 +122,8 @@ class ModelTrainer(object): ...@@ -174,18 +122,8 @@ class ModelTrainer(object):
pass pass
# model_config = {'input_size': 5, 'first_hidden_size': 8, 'second_hidden_size': 4, 'output_size': 2, # Loss Functions: BCELoss, BCEWithLogitsLoss, CrossEntropyLoss (not working), MSELoss (with reduction='sum')
# 'activation_function': 'Softmax', 'activation_config': {'dim': 1}} # Optimizer: Adam, SGD
# model = ANN_Model.ThreeLayerReLu(model_config) trainer = ModelTrainer({'loss_function': 'MSELoss', 'loss_config': {'reduction': 'sum'}})
# learning_rate = 1e-10 trainer.epoch_training()
# config = {'lr': learning_rate} trainer.save_model()
# # optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# optimizer = torch.optim.Adam(model.parameters(), **config)
# print(optimizer.__dict__)
# optimizer = torch.optim.SGD(model.parameters(), **config)
# print(optimizer.__class__)
# print(optimizer.__dict__)
# loss_fn = torch.nn.BCEWithLogitsLoss()
# print(loss_fn.__class__)
# trainer = ModelTrainer({})
# trainer.save_model()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment