Skip to content
Snippets Groups Projects
Select Git revision
  • 1993c4ab6c60cd08c11336835f62b154d9195d36
  • master default protected
2 results

Artificial_Neural_Network.py

Blame
  • Code owners
    Assign users and groups as approvers for specific file changes. Learn more.
    Artificial_Neural_Network.py 6.85 KiB
    # -*- coding: utf-8 -*-
    """
    @author: Laura C. Kühle, Soraya Terrab (sorayaterrab)
    
    TODO: Adapt '_train()' to fit style
    TODO: Add ANN testing from Soraya
    TODO: Add ANN classification from Soraya
    
    """
    import numpy as np
    import torch
    from torch.utils.data import TensorDataset, DataLoader
    
    
    class ArtificialNeuralNetwork(object):
        def _train(self):
            # Get Training/validation Datasets from Saved Files and Map to Torch Tensor and batched-datasets
            nt_smooth = 24000
            nt_troubled = 24000
            normalize = 1
            batch = 500
            train_input = "TrainInput"+str(int(nt_smooth / 1000))+"k"+str(int(nt_troubled / 1000))+"k"+"Normalized"+str(
                normalize)+".npy"
            train_output = "TrainOutput"+str(int(nt_smooth / 1000))+"k"+str(int(nt_troubled / 1000))+"k"+"Normalized"+str(
                0)+".npy"
            x_train = np.load(train_input)
            y_train = np.load(train_output)
            x_train, y_train = map(torch.tensor, (x_train, y_train))
            train_ds = TensorDataset(x_train, y_train)
            train_dl = DataLoader(train_ds, batch_size=batch, shuffle=True)
    
            nv_smooth = 8000
            nv_troubled = 8000
    
            valid_input = "ValidInput"+str(int(nv_smooth / 1000))+"k"+str(int(nv_troubled / 1000))+"k"+"Normalized"+str(
                normalize)+".npy"
            valid_output = "ValidOutput"+str(int(nv_smooth / 1000))+"k"+str(int(nv_troubled / 1000))+"k"+"Normalized"+str(
                0)+".npy"
            x_valid = np.load(valid_input)
            y_valid = np.load(valid_output)
            x_valid, y_valid = map(torch.tensor, (x_valid, y_valid))
            valid_ds = TensorDataset(x_valid, y_valid)
            valid_dl = DataLoader(valid_ds, batch_size=batch * 2)
    
            # Define Neural Network
    
            # Model with Linear -> 2 ReLU layers -> Linear -> SoftMax
            class ThreeLayerNet(torch.nn.Module):
                def __init__(self, d_in, h, d_out):
                    super(ThreeLayerNet, self).__init__()
                    self.input_linear = torch.nn.Linear(d_in, h)
                    self.middle_linear = torch.nn.Linear(h, h)
                    self.output_linear = torch.nn.Linear(h, d_out)
                    self.output_softmax = torch.nn.Softmax(dim=1)
    
                def forward(self, x_in):
                    h_relu1 = self.input_linear(x_in).clamp(min=0)
                    h_relu2 = self.middle_linear(h_relu1).clamp(min=0)
                    y_pred = self.output_linear(h_relu2)
                    y_pred = self.output_softmax(y_pred)
                    return y_pred
    
            # Model with Linear -> 2 ReLU layers -> Linear -> SoftMax
            class ThreeLayerNetDifferentNeuronsSoftMax(torch.nn.Module):
                def __init__(self, d_in, h1, h2, d_out):
                    super(ThreeLayerNetDifferentNeuronsSoftMax, self).__init__()
                    self.input_linear = torch.nn.Linear(d_in, h1)
                    self.middle_linear = torch.nn.Linear(h1, h2)
                    self.output_linear = torch.nn.Linear(h2, d_out)
                    self.output_softmax = torch.nn.Softmax(dim=1)
    
                def forward(self, x_in):
                    h_relu1 = self.input_linear(x_in).clamp(min=0)
                    h_relu2 = self.middle_linear(h_relu1).clamp(min=0)
                    y_pred = self.output_linear(h_relu2)
                    y_pred = self.output_softmax(y_pred)
                    return y_pred
    
            # Model with Linear -> 2 ReLU layers -> Linear -> SoftMax
            class ThreeLayerNetDifferentNeuronsSigmoid(torch.nn.Module):
                def __init__(self, d_in, h1, h2, d_out):
                    super(ThreeLayerNetDifferentNeuronsSigmoid, self).__init__()
                    self.input_linear = torch.nn.Linear(d_in, h1)
                    self.middle_linear = torch.nn.Linear(h1, h2)
                    self.output_linear = torch.nn.Linear(h2, d_out)
                    self.output_softmax = torch.nn.Sigmoid()
    
                def forward(self, x_in):
                    h_relu1 = self.input_linear(x_in).clamp(min=0)
                    h_relu2 = self.middle_linear(h_relu1).clamp(min=0)
                    y_pred = self.output_linear(h_relu2)
                    y_pred = self.output_softmax(y_pred)
                    return y_pred
    
            # Model using torch.nn.Sequential, another format
            # model = torch.nn.Sequential(
            #    torch.nn.Linear(d_in, h),
            #    torch.nn.ReLU(),
            #    torch.nn.Linear(h, d_out),
            # )
    
            # d_in is input dimension; h is hidden dimension; d_out is output dimension.
            # d_in, h, d_out = 5, 10, 2
    
            # d_in is input dimension; h1 is first hidden dimension;
            # h2 is second hidden dimension; d_out is output dimension.
            d_in_1, h1_1, h2_1, d_out_1 = 5, 8, 4, 2
    
            # Defining Model
            model = ThreeLayerNetDifferentNeuronsSoftMax(d_in_1, h1_1, h2_1, d_out_1)
    
            # Define Loss Function and Optimization method
            # loss_fn = torch.nn.MSELoss(reduction='sum')
            # loss_fn = torch.nn.CrossEntropyLoss()
            loss_fn = torch.nn.BCELoss()
            # loss_fn = torch.nn.BCEWithLogitsLoss()
            learning_rate = 1e-2
            optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
            # optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.5)
    
            # Training with Validation
            epochs = 1000
            threshold = 1e-5
            validation_loss = torch.zeros(10)
            for epoch in range(epochs):
                model.train()
                for x_batch, y_batch in train_dl:
                    pred = model(x_batch.float())
                    loss = loss_fn(pred, y_batch.float()).mean()
                    # if Loss_fn = torch.nn.CrossEntropyLoss() use below:
                    # loss = loss_fn(pred, y_batch.long()[:,0]).mean()
    
                    # Run back propagation, update the weights, and zero gradients for next epoch
                    loss.backward()
                    optimizer.step()
                    optimizer.zero_grad()
    
                model.eval()
                with torch.no_grad():
                    valid_loss = sum(
                        loss_fn(model(x_batch_valid.float()), y_batch_valid.float()) for x_batch_valid, y_batch_valid in
                        valid_dl)
    
                    # if Loss_fn = torch.nn.CrossEntropyLoss() use below:
                    # valid_loss = sum(loss_fn(model(x_batch_valid.float()), y_batch_valid.long()[:,
                    # 0]) for x_batch_valid, y_batch_valid in valid_dl)
    
                    if epoch % 100 == 99:
                        validation_loss[int(epoch / 99)-1] = valid_loss / len(valid_dl)
                        print(epoch, valid_loss / len(valid_dl))
    
                    if valid_loss / len(valid_dl) < threshold:
                        break
    
            # Saving Model
            norm = "1"
            nn = "2ReLU8+4nodesSM1"  # "2ReLU10nodesSM1"
            opt = "Adamlr1e-2"
            loss = "BCE"
            path = "Train24k24k_Valid8k8k_Norm"+norm+nn+opt+loss+".pt"
            torch.save(model.state_dict(), path)
    
            loss_file = "Loss"+path
            torch.save(validation_loss, loss_file)
            pass
    
        def _classify(self):
            pass