diff --git a/ANN_Data_Generator.py b/ANN_Data_Generator.py
index 81461f99e435d03ce1a2b653027d2e9820f89d8f..76bd497c8ee6add879b6db6be933947401c686e2 100644
--- a/ANN_Data_Generator.py
+++ b/ANN_Data_Generator.py
@@ -30,8 +30,8 @@ class TrainingDataGenerator(object):
         Builds random training data.
 
     """
-    def __init__(self, initial_conditions, left_bound=-1, right_bound=1, balance=0.5,
-                 stencil_length=3, directory='test_data'):
+    def __init__(self, initial_conditions, left_bound=-1, right_bound=1,
+                 balance=0.5, stencil_length=3, directory='test_data'):
         """Initializes TrainingDataGenerator.
 
         Parameters
@@ -47,7 +47,8 @@ class TrainingDataGenerator(object):
         stencil_length : int, optional
             Size of training data array. Default: 3.
         directory : str, optional
-            Path to directory in which training data is saved. Default: 'test_data'.
+            Path to directory in which training data is saved.
+            Default: 'test_data'.
 
         """
         self._balance = balance
@@ -56,7 +57,8 @@ class TrainingDataGenerator(object):
 
         # Set stencil length
         if stencil_length % 2 == 0:
-            raise ValueError('Invalid stencil length (even value): "%d"' % stencil_length)
+            raise ValueError('Invalid stencil length (even value): "%d"'
+                             % stencil_length)
         self._stencil_length = stencil_length
 
         # Separate smooth and discontinuous initial conditions
@@ -86,7 +88,8 @@ class TrainingDataGenerator(object):
         Returns
         -------
         data_dict : dict
-            Dictionary containing input (normalized and non-normalized) and output data.
+            Dictionary containing input (normalized and non-normalized) and
+            output data.
 
         """
         tic = time.perf_counter()
@@ -102,8 +105,8 @@ class TrainingDataGenerator(object):
     def _calculate_data_set(self, num_samples):
         """Calculates random training data of given stencil length.
 
-        Creates training data with a given ratio between smooth and discontinuous samples and
-        fixed stencil length.
+        Creates training data with a given ratio between smooth and
+        discontinuous samples and fixed stencil length.
 
         Parameters
         ----------
@@ -113,23 +116,26 @@ class TrainingDataGenerator(object):
         Returns
         -------
         dict
-            Dictionary containing input (normalized and non-normalized) and output data.
+            Dictionary containing input (normalized and non-normalized) and
+            output data.
 
         """
         num_smooth_samples = round(num_samples * self._balance)
-        smooth_input, smooth_output = self._generate_cell_data(num_smooth_samples,
-                                                               self._smooth_functions, True)
+        smooth_input, smooth_output = self._generate_cell_data(
+            num_smooth_samples, self._smooth_functions, True)
 
         num_troubled_samples = num_samples - num_smooth_samples
-        troubled_input, troubled_output = self._generate_cell_data(num_troubled_samples,
-                                                                   self._troubled_functions, False)
+        troubled_input, troubled_output = self._generate_cell_data(
+            num_troubled_samples, self._troubled_functions, False)
 
         # Merge Data
         input_matrix = np.concatenate((smooth_input, troubled_input), axis=0)
-        output_matrix = np.concatenate((smooth_output, troubled_output), axis=0)
+        output_matrix = np.concatenate((smooth_output, troubled_output),
+                                       axis=0)
 
         # Shuffle data while keeping correct input/output matches
-        order = np.random.permutation(num_smooth_samples + num_troubled_samples)
+        order = np.random.permutation(
+            num_smooth_samples + num_troubled_samples)
         input_matrix = input_matrix[order]
         output_matrix = output_matrix[order]
 
@@ -142,8 +148,9 @@ class TrainingDataGenerator(object):
     def _generate_cell_data(self, num_samples, initial_conditions, is_smooth):
         """Generates random training input and output.
 
-        Generates random training input and output for either smooth or discontinuous
-        initial conditions. For each input the output has the shape [is_smooth, is_troubled]
+        Generates random training input and output for either smooth or
+        discontinuous initial conditions. For each input the output has the
+        shape [is_smooth, is_troubled].
 
         Parameters
         ----------
@@ -174,7 +181,8 @@ class TrainingDataGenerator(object):
             # Select and initialize initial condition
             function_id = i % num_init_cond
             initial_condition = initial_conditions[function_id]['function']
-            initial_condition.randomize(initial_conditions[function_id]['config'])
+            initial_condition.randomize(
+                initial_conditions[function_id]['config'])
 
             # Build random stencil of given length
             interval, centers, spacing = self._build_stencil()
@@ -182,24 +190,27 @@ class TrainingDataGenerator(object):
             centers = [center[0] for center in centers]
 
             # Induce adjustment to capture troubled cells
-            adjustment = 0 if initial_condition.is_smooth else centers[self._stencil_length//2]
+            adjustment = 0 if initial_condition.is_smooth \
+                else centers[self._stencil_length//2]
             initial_condition.induce_adjustment(-spacing[0]/3)
 
             # Calculate basis coefficients for stencil
             polynomial_degree = np.random.randint(1, high=5)
             dg_scheme = DG_Approximation.DGScheme(
                 'NoDetection', polynomial_degree=polynomial_degree,
-                num_grid_cells=self._stencil_length, left_bound=left_bound, right_bound=right_bound,
-                quadrature='Gauss', quadrature_config={'num_eval_points': polynomial_degree+1})
-            input_data[i] = dg_scheme.build_training_data(adjustment, self._stencil_length,
-                                                          initial_condition)
+                num_grid_cells=self._stencil_length, left_bound=left_bound,
+                right_bound=right_bound, quadrature='Gauss',
+                quadrature_config={'num_eval_points': polynomial_degree+1})
+            input_data[i] = dg_scheme.build_training_data(
+                adjustment, self._stencil_length, initial_condition)
 
             count += 1
             if count % 1000 == 0:
                 print(str(count) + ' samples completed.')
 
         toc = time.perf_counter()
-        print('Finished calculating data ' + troubled_indicator + ' troubled cells!')
+        print('Finished calculating data ' + troubled_indicator +
+              ' troubled cells!')
         print(f'Calculation time: {toc - tic:0.4f}s\n')
 
         # Set output data
@@ -211,7 +222,8 @@ class TrainingDataGenerator(object):
     def _build_stencil(self):
         """Builds random stencil.
 
-        Calculates fixed number of cell centers around a random point in a given 1D domain.
+        Calculates fixed number of cell centers around a random point in a
+        given 1D domain.
 
         Returns
         -------
@@ -231,8 +243,9 @@ class TrainingDataGenerator(object):
 
         # Adjust grid spacing if necessary for stencil creation
         while point - self._stencil_length/2 * grid_spacing < self._left_bound\
-                or point + self._stencil_length/2 * grid_spacing > self._right_bound:
-            grid_spacing = grid_spacing / 2
+                or point + self._stencil_length/2 * \
+                grid_spacing > self._right_bound:
+            grid_spacing /= 2
 
         # Build x-point stencil
         interval = np.array([point - self._stencil_length/2 * grid_spacing,
diff --git a/ANN_Model.py b/ANN_Model.py
index e0140b520eee4fd365b7a61798ce16ccfe1845ba..aa90f01af740b16e28204acd0158c413b7e5e510 100644
--- a/ANN_Model.py
+++ b/ANN_Model.py
@@ -55,16 +55,18 @@ class ThreeLayerReLu(torch.nn.Module):
         activation_config = config.pop('activation_config', {})
 
         if not hasattr(torch.nn.modules.activation, activation_function):
-            raise ValueError('Invalid activation function: "%s"' % activation_function)
+            raise ValueError('Invalid activation function: "%s"'
+                             % activation_function)
 
-        self._name = self.__class__.__name__ + '_' + str(first_hidden_size) + '_' \
-            + str(second_hidden_size) + '_' + activation_function
+        self._name = self.__class__.__name__ + '_' + str(first_hidden_size) + \
+            '_' + str(second_hidden_size) + '_' + activation_function
 
         self.input_linear = torch.nn.Linear(input_size, first_hidden_size)
-        self.middle_linear = torch.nn.Linear(first_hidden_size, second_hidden_size)
+        self.middle_linear = torch.nn.Linear(first_hidden_size,
+                                             second_hidden_size)
         self.output_linear = torch.nn.Linear(second_hidden_size, output_size)
-        self._output_layer = getattr(torch.nn.modules.activation, activation_function)(
-            **activation_config)
+        self._output_layer = getattr(torch.nn.modules.activation,
+                                     activation_function)(**activation_config)
 
     def forward(self, input_data):
         """Executes forward propagation.
diff --git a/ANN_Training.py b/ANN_Training.py
index e7628a204253885df68c743e6a19eb52431227ec..ab52a28745a42e3706db4e4f1b17225792ca0ce8 100644
--- a/ANN_Training.py
+++ b/ANN_Training.py
@@ -13,7 +13,7 @@ TODO: Put legend outside plot (bbox_to_anchor)
 TODO: Put plotting into separate function
 TODO: Reduce number of testing epochs to 50
 TODO: Adapt docstring to uniform standard -> Done
-TODO: Change maximal line length to 79 (as advised by PEP8)
+TODO: Change maximal line length to 79 (as advised by PEP8) -> Done
 
 """
 import numpy as np
@@ -25,7 +25,8 @@ import torch
 import json
 from torch.utils.data import TensorDataset, DataLoader, random_split
 from sklearn.model_selection import KFold
-from sklearn.metrics import accuracy_score, precision_recall_fscore_support, roc_auc_score
+from sklearn.metrics import accuracy_score, precision_recall_fscore_support, \
+    roc_auc_score
 
 import ANN_Model
 from Plotting import plot_classification_accuracy, plot_boxplot
@@ -107,22 +108,23 @@ class ModelTrainer(object):
         self._optimizer = getattr(torch.optim, optimizer)(
             self._model.parameters(), **optimizer_config)
         self._validation_loss = torch.zeros(self._num_epochs//10)
-        print(type(self._model), type(self._loss_function), type(self._optimizer),
-              type(self._validation_loss))
+        print(type(self._model), type(self._loss_function),
+              type(self._optimizer), type(self._validation_loss))
 
     def epoch_training(self, dataset: torch.utils.data.dataset.TensorDataset,
                        num_epochs: int = None, verbose: bool = True) -> None:
         """Trains model for a given number of epochs.
 
-        Trains model and saves the validation loss. The training stops after the given number of
-        epochs or if the threshold is reached.
+        Trains model and saves the validation loss. The training stops after
+        the given number of epochs or if the threshold is reached.
 
         Parameters
         ----------
         dataset : torch.utils.data.dataset.TensorDataset
             Training dataset.
         num_epochs : int, optional
-            Number of epochs for training. Default: None (i.e. instance variable).
+            Number of epochs for training.
+            Default: None (i.e. instance variable).
         verbose : bool, optional
             Flag whether commentary in console is wanted. Default: False.
 
@@ -136,10 +138,12 @@ class ModelTrainer(object):
         num_samples = len(dataset)
         if verbose:
             print('Splitting data randomly into training and validation set.')
-        train_ds, valid_ds = random_split(dataset, [round(num_samples*0.8), round(num_samples*0.2)])
+        train_ds, valid_ds = random_split(dataset, [round(num_samples*0.8),
+                                                    round(num_samples*0.2)])
 
         # Load sets
-        train_dl = DataLoader(train_ds, batch_size=self._batch_size, shuffle=True)
+        train_dl = DataLoader(train_ds, batch_size=self._batch_size,
+                              shuffle=True)
         valid_dl = DataLoader(valid_ds, batch_size=self._batch_size * 2)
 
         # Training with Validation
@@ -153,7 +157,8 @@ class ModelTrainer(object):
                 pred = self._model(x_batch.float())
                 loss = self._loss_function(pred, y_batch.float()).mean()
 
-                # Run back propagation, update the weights, and zero gradients for next epoch
+                # Run back propagation, update the weights,
+                # and zero gradients for next epoch
                 loss.backward()
                 self._optimizer.step()
                 self._optimizer.zero_grad()
@@ -161,13 +166,16 @@ class ModelTrainer(object):
             self._model.eval()
             with torch.no_grad():
                 valid_loss = sum(
-                    self._loss_function(self._model(x_batch_valid.float()), y_batch_valid.float())
+                    self._loss_function(self._model(x_batch_valid.float()),
+                                        y_batch_valid.float())
                     for x_batch_valid, y_batch_valid in valid_dl)
 
                 if (epoch+1) % 100 == 0:
-                    self._validation_loss[int((epoch+1) / 100)-1] = valid_loss / len(valid_dl)
+                    self._validation_loss[int((epoch+1) / 100)-1] \
+                        = valid_loss / len(valid_dl)
                     if verbose:
-                        print(epoch+1, 'epochs completed. Loss:', valid_loss / len(valid_dl))
+                        print(epoch+1, 'epochs completed. Loss:',
+                              valid_loss / len(valid_dl))
 
                 if valid_loss / len(valid_dl) < self._threshold:
                     break
@@ -183,8 +191,9 @@ class ModelTrainer(object):
                    test_set: torch.utils.data.dataset.TensorDataset) -> dict:
         """Evaluates predictions of a model.
 
-        Trains a model and compares the predicted and true results by evaluating precision, recall,
-        and f-score for both classes, as well as accuracy and AUROC score.
+        Trains a model and compares the predicted and true results by
+        evaluating precision, recall, and f-score for both classes,
+        as well as accuracy and AUROC score.
 
         Parameters
         ----------
@@ -209,16 +218,21 @@ class ModelTrainer(object):
         y_true = y_test.detach().numpy()[:, 1]
         y_pred = model_output.detach().numpy()
         accuracy = accuracy_score(y_true, y_pred)
-        precision, recall, f_score, support = precision_recall_fscore_support(y_true, y_pred,
-                                                                              zero_division=0)
+        precision, recall, f_score, support = precision_recall_fscore_support(
+            y_true, y_pred, zero_division=0)
         auroc = roc_auc_score(y_true, y_pred)
 
-        return {'Precision_Smooth': precision[0], 'Precision_Troubled': precision[1],
-                'Recall_Smooth': recall[0], 'Recall_Troubled': recall[1],
-                'F-Score_Smooth': f_score[0], 'F-Score_Troubled': f_score[1],
-                'Accuracy': accuracy, 'AUROC': auroc}
-
-    def save_model(self, directory: str, model_name: str = 'test_model') -> None:
+        return {'Precision_Smooth': precision[0],
+                'Precision_Troubled': precision[1],
+                'Recall_Smooth': recall[0],
+                'Recall_Troubled': recall[1],
+                'F-Score_Smooth': f_score[0],
+                'F-Score_Troubled': f_score[1],
+                'Accuracy': accuracy,
+                'AUROC': auroc}
+
+    def save_model(self, directory: str,
+                   model_name: str = 'test_model') -> None:
         """Saves state and validation loss of a model.
 
         Parameters
@@ -235,12 +249,14 @@ class ModelTrainer(object):
             os.makedirs(model_dir)
 
         # Save model and loss
-        torch.save(self._model.state_dict(), model_dir + '/model__' + model_name + '.pt')
-        torch.save(self._validation_loss, model_dir + '/loss__' + model_name + '.pt')
+        torch.save(self._model.state_dict(), model_dir + '/model__' +
+                   model_name + '.pt')
+        torch.save(self._validation_loss, model_dir + '/loss__' +
+                   model_name + '.pt')
 
 
-def read_training_data(directory: str,
-                       normalized: bool = True) -> torch.utils.data.dataset.TensorDataset:
+def read_training_data(directory: str, normalized: bool = True) -> \
+        torch.utils.data.dataset.TensorDataset:
     """Reads training data from directory.
 
     Parameters
@@ -257,12 +273,15 @@ def read_training_data(directory: str,
 
     """
     # Get training dataset from saved file and map to Torch tensor and dataset
-    input_file = directory + ('/normalized_input_data.npy' if normalized else '/input_data.npy')
+    input_file = directory + ('/normalized_input_data.npy'
+                              if normalized else '/input_data.npy')
     output_file = directory + '/output_data.npy'
-    return TensorDataset(*map(torch.tensor, (np.load(input_file), np.load(output_file))))
+    return TensorDataset(*map(torch.tensor, (np.load(input_file),
+                                             np.load(output_file))))
 
 
-def evaluate_models(models: dict, directory: str, num_iterations: int = 100, colors: dict = None,
+def evaluate_models(models: dict, directory: str, num_iterations: int = 100,
+                    colors: dict = None,
                     compare_normalization: bool = False) -> None:
     """Evaluates the classification of a given set of models.
 
@@ -275,9 +294,11 @@ def evaluate_models(models: dict, directory: str, num_iterations: int = 100, col
     num_iterations : int, optional
         Number of iterations for evaluation. Default: 100.
     colors : dict, optional
-        Dictionary containing plotting colors. If None, set to default colors. Default: None.
+        Dictionary containing plotting colors. If None, set to default colors.
+        Default: None.
     compare_normalization : bool, optional
-        Flag whether both normalized and raw data should be evaluated. Default: False.
+        Flag whether both normalized and raw data should be evaluated.
+        Default: False.
 
     """
     tic = time.perf_counter()
@@ -292,8 +313,10 @@ def evaluate_models(models: dict, directory: str, num_iterations: int = 100, col
     if compare_normalization:
         print('Read raw, non-normalized training data.')
         datasets['raw'] = read_training_data(directory, False)
-    classification_stats = {measure: {model + ' (' + dataset + ')': [] for model in models
-                                      for dataset in datasets} for measure in colors}
+    classification_stats = {measure: {model + ' (' + dataset + ')': []
+                                      for model in models
+                                      for dataset in datasets}
+                            for measure in colors}
 
     print('\nTraining models with 5-fold cross validation...')
     print('Number of iterations:', num_iterations)
@@ -308,24 +331,29 @@ def evaluate_models(models: dict, directory: str, num_iterations: int = 100, col
                 for model in models:
                     result = models[model].test_model(training_set, test_set)
                     for measure in colors:
-                        classification_stats[measure][model + ' (' + dataset + ')'].append(
+                        classification_stats[measure][model + ' (' + dataset +
+                                                      ')'].append(
                             result[measure])
-        if iteration+1%max(10, 10*(num_iterations//100)):
+        if iteration+1 % max(10, 10*(num_iterations//100)):
             print(iteration+1, 'iterations completed.')
     toc_train = time.perf_counter()
     print('Finished training models with 5-fold cross validation!')
     print(f'Training time: {toc_train - tic_train:0.4f}s\n')
 
-    with open(directory + '/' + '_'.join(models.keys()) + '.json', 'w') as json_file:
+    with open(directory + '/' + '_'.join(models.keys()) + '.json', 'w')\
+            as json_file:
         json_file.write(json.dumps(classification_stats))
-    with open(directory + '/' + '_'.join(models.keys()) + '.json') as json_file:
+    with open(directory + '/' + '_'.join(models.keys()) + '.json')\
+            as json_file:
         classification_stats = json.load(json_file)
 
     print('Plotting evaluation of trained models.')
     plot_boxplot(classification_stats, colors)
     classification_stats = {measure: {model + ' (' + dataset + ')': np.array(
-        classification_stats[measure][model + ' (' + dataset + ')']).mean() for model in models
-                                      for dataset in datasets} for measure in colors}
+        classification_stats[measure][model + ' (' + dataset + ')']).mean()
+                                      for model in models
+                                      for dataset in datasets}
+                            for measure in colors}
     plot_classification_accuracy(classification_stats, colors)
 
     # Set paths for plot files if not existing already
@@ -341,6 +369,7 @@ def evaluate_models(models: dict, directory: str, num_iterations: int = 100, col
             os.makedirs(plot_dir + '/' + identifier)
 
         plt.figure(identifier)
-        plt.savefig(plot_dir + '/' + identifier + '/' + '_'.join(models.keys()) + '.pdf')
+        plt.savefig(plot_dir + '/' + identifier + '/' +
+                    '_'.join(models.keys()) + '.pdf')
     toc = time.perf_counter()
     print(f'Total runtime: {toc - tic:0.4f}s')
diff --git a/Basis_Function.py b/Basis_Function.py
index 197be81bc29ea0ff6561635cd5770718f92537eb..96d0b4ebff7304d4b5199357b2caebb266428f2b 100644
--- a/Basis_Function.py
+++ b/Basis_Function.py
@@ -2,7 +2,8 @@
 """
 @author: Laura C. Kühle
 
-TODO: Contemplate whether calculating projections during initialization can save time
+TODO: Contemplate whether calculating projections during initialization can
+    save time
 
 """
 import numpy as np
@@ -125,7 +126,8 @@ class Legendre(Vector):
         Returns
         -------
         ndarray
-            Vector containing Legendre polynomial evaluated at evaluation point.
+            Vector containing Legendre polynomial evaluated at evaluation
+            point.
 
         """
         vector = []
@@ -194,30 +196,44 @@ class OrthonormalLegendre(Legendre):
         if degree == 0:
             return [np.sqrt(0.5) + eval_point*0]
         if degree == 1:
-            return [np.sqrt(1.5) * (-1 + 2*eval_point), np.sqrt(0.5) * (-2 + 3*eval_point)]
+            return [np.sqrt(1.5) * (-1 + 2*eval_point),
+                    np.sqrt(0.5) * (-2 + 3*eval_point)]
         if degree == 2:
-            return [1/3 * np.sqrt(0.5) * (1 - 24*eval_point + 30*(eval_point**2)),
-                    1/2 * np.sqrt(1.5) * (3 - 16*eval_point + 15*(eval_point**2)),
-                    1/3 * np.sqrt(2.5) * (4 - 15*eval_point + 12*(eval_point**2))]
+            return [1/3 * np.sqrt(0.5) *
+                    (1 - 24*eval_point + 30*(eval_point**2)),
+                    1/2 * np.sqrt(1.5) *
+                    (3 - 16*eval_point + 15*(eval_point**2)),
+                    1/3 * np.sqrt(2.5) *
+                    (4 - 15*eval_point + 12*(eval_point**2))]
         if degree == 3:
-            return [np.sqrt(15/34) * (1 + 4*eval_point - 30*(eval_point**2) + 28*(eval_point**3)),
-                    np.sqrt(1/42) * (-4 + 105*eval_point - 300*(eval_point**2)
-                                     + 210*(eval_point**3)),
-                    1/2 * np.sqrt(35/34) * (-5 + 48*eval_point - 105*(eval_point**2)
-                                            + 64*(eval_point**3)),
-                    1/2 * np.sqrt(5/42) * (-16 + 105*eval_point - 192*(eval_point**2)
-                                           + 105*(eval_point**3))]
+            return [np.sqrt(15/34) *
+                    (1 + 4*eval_point - 30*(eval_point**2)
+                     + 28*(eval_point**3)),
+                    np.sqrt(1/42) *
+                    (-4 + 105*eval_point - 300*(eval_point**2)
+                     + 210*(eval_point**3)),
+                    1/2 * np.sqrt(35/34) *
+                    (-5 + 48*eval_point - 105*(eval_point**2)
+                     + 64*(eval_point**3)),
+                    1/2 * np.sqrt(5/42) *
+                    (-16 + 105*eval_point - 192*(eval_point**2)
+                     + 105*(eval_point**3))]
         if degree == 4:
-            return [np.sqrt(1/186) * (1 + 30*eval_point + 210*(eval_point**2)
-                                      - 840*(eval_point**3) + 630*(eval_point**4)),
-                    0.5 * np.sqrt(1/38) * (-5 - 144*eval_point + 1155*(eval_point**2)
-                                           - 2240*(eval_point**3) + 1260*(eval_point**4)),
-                    np.sqrt(35/14694) * (22 - 735*eval_point + 3504*(eval_point**2)
-                                         - 5460*(eval_point**3) + 2700*(eval_point**4)),
-                    1/8 * np.sqrt(21/38) * (35 - 512*eval_point + 1890*(eval_point**2)
-                                            - 2560*(eval_point**3) + 1155*(eval_point**4)),
-                    0.5 * np.sqrt(7/158) * (32 - 315*eval_point + 960*(eval_point**2)
-                                            - 1155*(eval_point**3) + 480*(eval_point**4))]
+            return [np.sqrt(1/186) *
+                    (1 + 30*eval_point + 210*(eval_point**2)
+                     - 840*(eval_point**3) + 630*(eval_point**4)),
+                    0.5 * np.sqrt(1/38) *
+                    (-5 - 144*eval_point + 1155*(eval_point**2)
+                     - 2240*(eval_point**3) + 1260*(eval_point**4)),
+                    np.sqrt(35/14694) *
+                    (22 - 735*eval_point + 3504*(eval_point**2)
+                     - 5460*(eval_point**3) + 2700*(eval_point**4)),
+                    1/8 * np.sqrt(21/38) *
+                    (35 - 512*eval_point + 1890*(eval_point**2)
+                     - 2560*(eval_point**3) + 1155*(eval_point**4)),
+                    0.5 * np.sqrt(7/158) *
+                    (32 - 315*eval_point + 960*(eval_point**2)
+                     - 1155*(eval_point**3) + 480*(eval_point**4))]
 
         raise ValueError('Invalid value: Alpert\'s wavelet is only available \
                          up to degree 4 for this application')
@@ -228,8 +244,8 @@ class OrthonormalLegendre(Legendre):
         Returns
         -------
         ndarray
-            Array containing the basis projection based on the integrals of the product
-            of two basis vectors for each degree combination.
+            Array containing the basis projection based on the integrals of
+            the product of two basis vectors for each degree combination.
 
         """
         basis_projection_left = self._build_basis_matrix(z, 0.5 * (z - 1))
@@ -269,15 +285,19 @@ class OrthonormalLegendre(Legendre):
         Returns
         -------
         ndarray
-            Array containing the multiwavelet projection based on the integrals of the product
-            of a basis vector and a wavelet vector for each degree combination.
+            Array containing the multiwavelet projection based on the integrals
+            of the product of a basis vector and a wavelet vector for each
+            degree combination.
 
         """
-        wavelet_projection_left = self._build_multiwavelet_matrix(z, -0.5*(z-1), True)
-        wavelet_projection_right = self._build_multiwavelet_matrix(z, 0.5*(z+1), False)
+        wavelet_projection_left = self._build_multiwavelet_matrix(
+            z, -0.5*(z-1), True)
+        wavelet_projection_right = self._build_multiwavelet_matrix(
+            z, 0.5*(z+1), False)
         return wavelet_projection_left, wavelet_projection_right
 
-    def _build_multiwavelet_matrix(self, first_param, second_param, is_left_matrix):
+    def _build_multiwavelet_matrix(self, first_param, second_param,
+                                   is_left_matrix):
         """Constructs a multiwavelet matrix.
 
         Parameters
@@ -292,7 +312,8 @@ class OrthonormalLegendre(Legendre):
         Returns
         -------
         ndarray
-            Matrix containing the integral of products of a basis and a wavelet vector.
+            Matrix containing the integral of products of a basis and a wavelet
+            vector.
 
         """
         matrix = []
diff --git a/DG_Approximation.py b/DG_Approximation.py
index 3d24bd0caec0733849d3c9780c92a3c9b4a69d99..a4d675efab14ba6f3df5231564729b9f29bc049b 100644
--- a/DG_Approximation.py
+++ b/DG_Approximation.py
@@ -12,13 +12,15 @@ TODO: Check whether all types in doc are correct
 TODO: Check whether 'projection' is always a np.array()
 TODO: Check whether all instance variables sensible
 TODO: Use cfl_number for updating, not just time
-TODO: Adjust code to allow classes for all equations (Burger, linear advection, 1D Euler)
+TODO: Adjust code to allow classes for all equations
+    (Burger, linear advection, 1D Euler)
 TODO: Add documentation to ANN files
 TODO: Limit line to 80 characters
 TODO: Remove object inheritance from classes
 TODO: Rename files according to standard
 TODO: Add verbose output
-TODO: Adapt TCD from Soraya (Dropbox->...->TEST_troubled-cell-detector->Troubled_Cell_Detector)
+TODO: Adapt TCD from Soraya
+    (Dropbox->...->TEST_troubled-cell-detector->Troubled_Cell_Detector)
 TODO: Improve file naming (e.g. use '.' instead of '__')
 TODO: Add way of saving data (np.savez('data/' + name,
     coefficients=projection, troubled_cells=troubled_cells) )
@@ -49,8 +51,8 @@ x = Symbol('x')
 class DGScheme(object):
     """Class for Discontinuous Galerkin Method.
 
-    Approximates linear advection equation using Discontinuous Galerkin Method with
-    troubled-cell-based limiting.
+    Approximates linear advection equation using Discontinuous Galerkin Method
+    with troubled-cell-based limiting.
 
     Attributes
     ----------
@@ -104,7 +106,8 @@ class DGScheme(object):
         plot_dir : str, optional
             Path to directory in which plots are saved. Default: 'test'.
         history_threshold : float, optional
-            Threshold when history will be recorded. Default: math.ceil(0.2/cfl_number).
+            Threshold when history will be recorded.
+            Default: math.ceil(0.2/cfl_number).
         detector_config : dict, optional
             Additional parameters for detector object. Default: {}.
         init_cond : str, optional
@@ -133,7 +136,8 @@ class DGScheme(object):
         self._right_bound = kwargs.pop('right_bound', 1)
         self._verbose = kwargs.pop('verbose', False)
         self._plot_dir = kwargs.pop('plot_dir', 'testing')
-        self._history_threshold = kwargs.pop('history_threshold', math.ceil(0.2/self._cfl_number))
+        self._history_threshold = kwargs.pop('history_threshold',
+                                             math.ceil(0.2/self._cfl_number))
         self._detector = detector
         self._detector_config = kwargs.pop('detector_config', {})
         self._init_cond = kwargs.pop('init_cond', 'Sine')
@@ -153,13 +157,15 @@ class DGScheme(object):
         if not hasattr(Troubled_Cell_Detector, self._detector):
             raise ValueError('Invalid detector: "%s"' % self._detector)
         if not hasattr(Initial_Condition, self._init_cond):
-            raise ValueError('Invalid initial condition: "%s"' % self._init_cond)
+            raise ValueError('Invalid initial condition: "%s"'
+                             % self._init_cond)
         if not hasattr(Limiter, self._limiter):
             raise ValueError('Invalid limiter: "%s"' % self._limiter)
         if not hasattr(Quadrature, self._quadrature):
             raise ValueError('Invalid quadrature: "%s"' % self._quadrature)
         if not hasattr(Update_Scheme, self._update_scheme):
-            raise ValueError('Invalid update scheme: "%s"' % self._update_scheme)
+            raise ValueError('Invalid update scheme: "%s"'
+                             % self._update_scheme)
 
         self._reset()
 
@@ -168,21 +174,26 @@ class DGScheme(object):
         self._init_cond = getattr(Initial_Condition, self._init_cond)(
             self._left_bound, self._right_bound, self._init_config)
         self._limiter = getattr(Limiter, self._limiter)(self._limiter_config)
-        self._quadrature = getattr(Quadrature, self._quadrature)(self._quadrature_config)
+        self._quadrature = getattr(Quadrature, self._quadrature)(
+            self._quadrature_config)
         self._detector = getattr(Troubled_Cell_Detector, self._detector)(
-            self._detector_config, self._mesh, self._wave_speed, self._polynomial_degree,
-            self._num_grid_cells, self._final_time, self._left_bound, self._right_bound,
-            self._basis, self._init_cond, self._quadrature)
+            self._detector_config, self._mesh, self._wave_speed,
+            self._polynomial_degree, self._num_grid_cells, self._final_time,
+            self._left_bound, self._right_bound, self._basis,
+            self._init_cond, self._quadrature)
         self._update_scheme = getattr(Update_Scheme, self._update_scheme)(
-            self._polynomial_degree, self._num_grid_cells, self._detector, self._limiter)
+            self._polynomial_degree, self._num_grid_cells, self._detector,
+            self._limiter)
 
     def approximate(self):
         """Approximates projection.
 
-        Initializes projection and evolves it in time. Each time step consists of three parts:
-        A projection update, a troubled-cell detection, and limiting based on the detected cells.
-        At final time, result and error plots are generated and, if verbose flag is set, also
-        displayed.
+        Initializes projection and evolves it in time. Each time step consists
+        of three parts: A projection update, a troubled-cell detection,
+        and limiting based on the detected cells.
+
+        At final time, result and error plots are
+        generated and, if verbose flag is set, also displayed.
 
         """
         projection = self._do_initial_projection(self._init_cond)
@@ -201,7 +212,8 @@ class DGScheme(object):
                 cfl_number = self._wave_speed * time_step / self._cell_len
 
             # Update projection
-            projection, troubled_cells = self._update_scheme.step(projection, cfl_number)
+            projection, troubled_cells = self._update_scheme.step(projection,
+                                                                  cfl_number)
             iteration += 1
 
             if (iteration % self._history_threshold) == 0:
@@ -210,14 +222,16 @@ class DGScheme(object):
 
             current_time += time_step
 
-        # Plot exact/approximate results, errors, shock tubes and any detector-dependant plots
-        self._detector.plot_results(projection, troubled_cell_history, time_history)
+        # Plot exact/approximate results, errors, shock tubes,
+        # and any detector-dependant plots
+        self._detector.plot_results(projection, troubled_cell_history,
+                                    time_history)
 
     def save_plots(self, plot_name):
         """Saves plotted results.
 
-        Sets plot directory, if not already existing, and saves plots generated during the last
-        approximation.
+        Sets plot directory, if not already existing, and saves plots
+        generated during the last approximation.
 
         Parameters
         ----------
@@ -236,7 +250,8 @@ class DGScheme(object):
                 os.makedirs(self._plot_dir + '/' + identifier)
 
             plt.figure(identifier)
-            plt.savefig(self._plot_dir + '/' + identifier + '/' + plot_name + '.pdf')
+            plt.savefig(self._plot_dir + '/' + identifier + '/' +
+                        plot_name + '.pdf')
 
     def _reset(self):
         """Resets instance variables."""
@@ -268,20 +283,22 @@ class DGScheme(object):
     def _do_initial_projection(self, initial_condition, adjustment=0):
         """Calculates initial projection.
 
-        Calculates a projection at time step 0 and adds ghost cells on both sides of the array.
+        Calculates a projection at time step 0 and adds ghost cells on both
+        sides of the array.
 
         Parameters
         ----------
         initial_condition : InitialCondition object
-            Initial condition used for calculation. May differ from instance variable.
+            Initial condition used for calculation. May differ from instance
+            variable.
         adjustment: float
             Extent of adjustment of each evaluation point in x-direction.
 
         Returns
         -------
         ndarray
-            Matrix containing projection of size (N+2, p+1) with N being the number of grid cells
-            and p being the polynomial degree.
+            Matrix containing projection of size (N+2, p+1) with N being the
+            number of grid cells and p being the polynomial degree.
 
         """
         # Initialize matrix and set first entry to accommodate for ghost cell
@@ -294,11 +311,12 @@ class DGScheme(object):
 
             for degree in range(self._polynomial_degree + 1):
                 new_entry = sum(
-                    initial_condition.calculate(eval_point
-                                                + self._cell_len/2
-                                                * self._quadrature.get_eval_points()[point]
-                                                - adjustment)
-                    * basis_vector[degree].subs(x, self._quadrature.get_eval_points()[point])
+                    initial_condition.calculate(
+                        eval_point + self._cell_len/2
+                        * self._quadrature.get_eval_points()[point]
+                        - adjustment)
+                    * basis_vector[degree].subs(
+                        x, self._quadrature.get_eval_points()[point])
                     * self._quadrature.get_weights()[point]
                     for point in range(self._quadrature.get_num_points()))
                 new_row.append(np.float64(new_entry))
@@ -313,10 +331,12 @@ class DGScheme(object):
         # print(np.array(output_matrix).shape)
         return np.transpose(np.array(output_matrix))
 
-    def build_training_data(self, adjustment, stencil_length, initial_condition=None):
+    def build_training_data(self, adjustment, stencil_length,
+                            initial_condition=None):
         """Builds training data set.
 
-        Initializes projection and calculates cell averages and reconstructions for it.
+        Initializes projection and calculates cell averages and
+        reconstructions for it.
 
         Parameters
         ----------
@@ -325,17 +345,19 @@ class DGScheme(object):
         stencil_length : int
             Size of training data array.
         initial_condition : InitialCondition object, optional
-            Initial condition used for calculation. Default: None (i.e. instance variable).
+            Initial condition used for calculation.
+            Default: None (i.e. instance variable).
 
         Returns
         -------
         ndarray
-            Matrix containing cell averages and reconstructions for initial projection.
+            Matrix containing cell averages and reconstructions for initial
+            projection.
 
         """
         if initial_condition is None:
             initial_condition = self._init_cond
         projection = self._do_initial_projection(initial_condition, adjustment)
 
-        return self._detector.calculate_cell_average_and_reconstructions(projection[:, 1:-1],
-                                                                         stencil_length)
+        return self._detector.calculate_cell_average_and_reconstructions(
+            projection[:, 1:-1], stencil_length)
diff --git a/Initial_Condition.py b/Initial_Condition.py
index 625164fd265c8f41d556fbbcc4812a804a940a00..74b8efe304b59e715046fdbabfa5937e8b0244f8 100644
--- a/Initial_Condition.py
+++ b/Initial_Condition.py
@@ -23,7 +23,7 @@ class InitialCondition(object):
     induce_adjustment(value)
         Adjusts x-value of function.
     randomize(config)
-        Sets all instance variables to random value if not determined otherwise.
+        Sets all non-determined instance variables to random value.
     calculate(x)
         Evaluates function at given x-value.
 
@@ -39,7 +39,7 @@ class InitialCondition(object):
             Right boundary of interval.
         config : dict
             Additional parameters for initial condition.
-        
+
         """
         self._left_bound = left_bound
         self._right_bound = right_bound
@@ -77,7 +77,7 @@ class InitialCondition(object):
         pass
 
     def randomize(self, config):
-        """Sets all instance variables to random value if not determined otherwise.
+        """Sets all non-determined instance variables to random value.
 
         Parameters
         ----------
@@ -90,7 +90,8 @@ class InitialCondition(object):
     def calculate(self, x):
         """Evaluates function at given x-value.
 
-        Projects x-value into interval of the periodic function and evaluates the function.
+        Projects x-value into interval of the periodic function and evaluates
+        the function.
 
         Parameters
         ----------
@@ -104,9 +105,9 @@ class InitialCondition(object):
 
         """
         while x < self._left_bound:
-            x = x + self._interval_len
+            x += self._interval_len
         while x > self._right_bound:
-            x = x - self._interval_len
+            x -= self._interval_len
         return self._get_point(x)
 
     def _get_point(self, x):
@@ -132,12 +133,13 @@ class Sine(InitialCondition):
     Attributes
     ----------
     factor : float
-        Factor by which is the evaluation point is multiplied before applying sine.
+        Factor by which is the evaluation point is multiplied before applying
+        sine.
 
     Methods
     -------
     randomize(config)
-        Sets all instance variables to random value if not determined otherwise.
+        Sets all non-determined instance variables to random value.
 
     """
     def _reset(self, config):
@@ -155,7 +157,7 @@ class Sine(InitialCondition):
         self._factor = config.pop('factor', 2)
 
     def randomize(self, config):
-        """Sets all instance variables to random value if not determined otherwise.
+        """Sets all non-determined instance variables to random value.
 
         Parameters
         ----------
@@ -212,9 +214,9 @@ class Box(InitialCondition):
 
         """
         if x < -1:
-            x = x + 2
+            x += 2
         if x > 1:
-            x = x - 2
+            x -= 2
         if (x >= -0.5) & (x <= 0.5):
             return 1
         else:
@@ -224,13 +226,13 @@ class Box(InitialCondition):
 class FourPeakWave(InitialCondition):
     """Class for a function with four peaks.
 
-    The function is defined piece-by-piece and consists of a Gaussian function, a box function,
-    a symmetric absolute function, and an elliptic function.
+    The function is defined piece-by-piece and consists of a Gaussian function,
+    a box function, a symmetric absolute function, and an elliptic function.
 
     Attributes
     ----------
     alpha : float
-        Factor used for the elliptic function..
+        Factor used for the elliptic function.
     delta : float
         Value used to adjust z for the Gaussian function.
     beta : float
@@ -344,7 +346,7 @@ class Linear(InitialCondition):
     Methods
     -------
     randomize(config)
-        Sets all instance variables to random value if not determined otherwise.
+        Sets all non-determined instance variables to random value.
 
     """
     def _reset(self, config):
@@ -362,7 +364,7 @@ class Linear(InitialCondition):
         self._factor = config.pop('factor', 1)
 
     def randomize(self, config):
-        """Sets all instance variables to random value if not determined otherwise.
+        """Sets all non-determined instance variables to random value.
 
         Parameters
         ----------
@@ -404,7 +406,7 @@ class LinearAbsolut(InitialCondition):
     is_smooth()
         Returns flag whether function is smooth.
     randomize(config)
-        Sets all instance variables to random value if not determined otherwise.
+        Sets all non-determined instance variables to random value.
 
     """
     def _reset(self, config):
@@ -426,7 +428,7 @@ class LinearAbsolut(InitialCondition):
         return False
 
     def randomize(self, config):
-        """Sets all instance variables to random value if not determined otherwise.
+        """Sets all non-determined instance variables to random value.
 
         Parameters
         ----------
@@ -507,7 +509,8 @@ class DiscontinuousConstant(InitialCondition):
             Value of function evaluates at x-value.
 
         """
-        return self._left_factor * (x <= self._x0) + self._right_factor * (x > self._x0)
+        return self._left_factor * (x <= self._x0) \
+            + self._right_factor * (x > self._x0)
 
 
 class Polynomial(InitialCondition):
@@ -523,7 +526,7 @@ class Polynomial(InitialCondition):
     Methods
     -------
     randomize(config)
-        Sets all instance variables to random value if not determined otherwise.
+        Sets all non-determined instance variables to random value.
 
     """
     def _reset(self, config):
@@ -542,7 +545,7 @@ class Polynomial(InitialCondition):
         self._exponential = config.pop('exponential', 2)
 
     def randomize(self, config):
-        """Sets all instance variables to random value if not determined otherwise.
+        """Sets all non-determined instance variables to random value.
 
         Parameters
         ----------
@@ -583,7 +586,7 @@ class Continuous(InitialCondition):
     Methods
     -------
     randomize(config)
-        Sets all instance variables to random value if not determined otherwise.
+        Sets all non-determined instance variables to random value.
 
     """
     def _reset(self, config):
@@ -601,7 +604,7 @@ class Continuous(InitialCondition):
         self._factor = config.pop('factor', 1)
 
     def randomize(self, config):
-        """Sets all instance variables to random value if not determined otherwise.
+        """Sets all non-determined instance variables to random value.
 
         Parameters
         ----------
@@ -643,7 +646,7 @@ class HeavisideOneSided(InitialCondition):
     is_smooth()
         Returns flag whether function is smooth.
     randomize(config)
-        Sets all instance variables to random value if not determined otherwise.
+        Sets all non-determined instance variables to random value.
 
     """
     def _reset(self, config):
@@ -665,7 +668,7 @@ class HeavisideOneSided(InitialCondition):
         return False
 
     def randomize(self, config):
-        """Sets all instance variables to random value if not determined otherwise.
+        """Sets all non-determined instance variables to random value.
 
         Parameters
         ----------
@@ -713,7 +716,7 @@ class HeavisideTwoSided(InitialCondition):
     induce_adjustment(value)
         Adjusts x-value of function.
     randomize(config)
-        Sets all instance variables to random value if not determined otherwise.
+        Sets all non-determined instance variables to random value.
 
     """
     def _reset(self, config):
@@ -748,7 +751,7 @@ class HeavisideTwoSided(InitialCondition):
         self._adjustment = value
 
     def randomize(self, config):
-        """Sets all instance variables to random value if not determined otherwise.
+        """Sets all non-determined instance variables to random value.
 
         Parameters
         ----------
@@ -758,7 +761,8 @@ class HeavisideTwoSided(InitialCondition):
         """
         left_factor = config.pop('left_factor', np.random.choice([-1, 1]))
         right_factor = config.pop('right_factor', np.random.choice([-1, 1]))
-        adjustment = config.pop('adjustment', np.random.uniform(low=-1, high=1))
+        adjustment = config.pop('adjustment',
+                                np.random.uniform(low=-1, high=1))
         config = {'left_factor': left_factor, 'right_factor': right_factor,
                   'adjustment': adjustment}
         self._reset(config)
@@ -777,6 +781,6 @@ class HeavisideTwoSided(InitialCondition):
             Value of function evaluates at x-value.
 
         """
-        return self._left_factor\
-               - self._left_factor * np.heaviside(x - self._adjustment, 0)\
-               - self._right_factor * np.heaviside(x + self._adjustment, 0)
+        return self._left_factor \
+            - self._left_factor * np.heaviside(x - self._adjustment, 0)\
+            - self._right_factor * np.heaviside(x + self._adjustment, 0)
diff --git a/Limiter.py b/Limiter.py
index 567b491491b4c972dc03907eb2997d994622d54c..e5c1f9381f83caa04653470c799945893473178d 100644
--- a/Limiter.py
+++ b/Limiter.py
@@ -80,13 +80,14 @@ class NoLimiter(Limiter):
 class MinMod(Limiter):
     """Class for minmod limiting function.
 
-    Sets projection for higher degrees to zero when forward backward, and cell slope values have
-    not the same sign.
+    Sets projection for higher degrees to zero when forward backward, and cell
+    slope values have not the same sign.
 
     Attributes
     ----------
     erase_degree : int
-        Polynomial degree up to which projection is not set to zero during limiting.
+        Polynomial degree up to which projection is not set to zero during
+        limiting.
 
     Methods
     -------
@@ -109,7 +110,7 @@ class MinMod(Limiter):
         self._erase_degree = config.pop('erase_degree', 0)
 
     def get_name(self):
-        """Returns string of class name concatenated with the erase degree."""
+        """Returns string of class name concatenated with the erase-degree."""
         return self.__class__.__name__ + str(self._erase_degree)
 
     def apply(self, projection, cell):
@@ -129,7 +130,8 @@ class MinMod(Limiter):
 
         """
         cell_slope = self._set_cell_slope(projection, cell)
-        is_good_cell = self._determine_modification(projection, cell, cell_slope)
+        is_good_cell = self._determine_modification(projection, cell,
+                                                    cell_slope)
 
         if is_good_cell:
             return projection[:, cell]
@@ -158,10 +160,13 @@ class MinMod(Limiter):
             Flag whether cell should be adjusted.
 
         """
-        forward_slope = (projection[0][cell+1] - projection[0][cell]) * (0.5**0.5)
-        backward_slope = (projection[0][cell] - projection[0][cell-1]) * (0.5**0.5)
+        forward_slope = (projection[0][cell+1]
+                         - projection[0][cell]) * (0.5**0.5)
+        backward_slope = (projection[0][cell]
+                          - projection[0][cell-1]) * (0.5**0.5)
 
-        return (forward_slope <= 0) & (backward_slope <= 0) & (cell_slope <= 0) \
+        return (forward_slope <= 0) & (backward_slope <= 0) \
+            & (cell_slope <= 0) \
             | (forward_slope >= 0) & (backward_slope >= 0) & (cell_slope >= 0)
 
     @staticmethod
@@ -183,8 +188,9 @@ class MinMod(Limiter):
         """
         slope = []
         for current_cell in range(len(projection[0])):
-            new_entry = sum(projection[degree][current_cell] * (degree+0.5)**0.5
-                            for degree in range(1, len(projection)))
+            new_entry = sum(
+                projection[degree][current_cell] * (degree+0.5)**0.5
+                for degree in range(1, len(projection)))
             slope.append(new_entry)
         return slope[cell]
 
@@ -192,15 +198,16 @@ class MinMod(Limiter):
 class ModifiedMinMod(MinMod):
     """Class for modified minmod limiting function.
 
-    Sets projection for higher degrees to zero when forward backward, and cell slope values have
-    not the same sign and cell slope is significantly high.
+    Sets projection for higher degrees to zero when forward backward, and cell
+    slope values have not the same sign and cell slope is significantly high.
 
     Attributes
     ----------
     cell_len : float
         Length of a cell in mesh.
     mod_factor : float
-        Factor determining whether cell slope is significantly high enough for modification.
+        Factor determining whether cell slope is significantly high enough for
+        modification.
 
     Methods
     -------
@@ -229,7 +236,7 @@ class ModifiedMinMod(MinMod):
         self._mod_factor = config.pop('mod_factor', 0)
 
     def get_name(self):
-        """Returns string of class name concatenated with the erase degree."""
+        """Returns string of class name concatenated with the erase-degree."""
         return self.__class__.__name__ + str(self._erase_degree)
 
     def _determine_modification(self, projection, cell, cell_slope):
diff --git a/Plotting.py b/Plotting.py
index 0406f13f081a3d1fe22eabba5dd45614c2be8f0a..debe9258acd2b89b1830edf9daf58fbb4c450df6 100644
--- a/Plotting.py
+++ b/Plotting.py
@@ -61,7 +61,8 @@ def plot_semilog_error(grid: ndarray, pointwise_error: ndarray) -> None:
     grid : ndarray
         List of mesh evaluation points.
     pointwise_error : ndarray
-        Array containing pointwise difference between exact and approximate solution.
+        Array containing pointwise difference between exact and approximate
+        solution.
 
     """
     plt.figure('semilog_error')
@@ -91,10 +92,12 @@ def plot_error(grid: ndarray, exact: ndarray, approx: ndarray) -> None:
     plt.title('Errors')
 
 
-def plot_shock_tube(num_grid_cells: int, troubled_cell_history: list, time_history: list) -> None:
+def plot_shock_tube(num_grid_cells: int, troubled_cell_history: list,
+                    time_history: list) -> None:
     """Plots shock tube.
 
-    Plots detected troubled cells over time to depict the evolution of shocks as shock tubes.
+    Plots detected troubled cells over time to depict the evolution of shocks
+    as shock tubes.
 
     Parameters
     ----------
@@ -117,9 +120,10 @@ def plot_shock_tube(num_grid_cells: int, troubled_cell_history: list, time_histo
     plt.title('Shock Tubes')
 
 
-def plot_details(fine_projection: ndarray, fine_mesh: ndarray, coarse_projection: ndarray,
-                 basis: ndarray, wavelet: ndarray, multiwavelet_coeffs: ndarray,
-                 num_coarse_grid_cells: int, polynomial_degree: int) -> None:
+def plot_details(fine_projection: ndarray, fine_mesh: ndarray,
+                 coarse_projection: ndarray, basis: ndarray, wavelet: ndarray,
+                 multiwavelet_coeffs: ndarray, num_coarse_grid_cells: int,
+                 polynomial_degree: int) -> None:
     """Plots details of projection to coarser mesh.
 
     Parameters
@@ -141,19 +145,23 @@ def plot_details(fine_projection: ndarray, fine_mesh: ndarray, coarse_projection
         Polynomial degree.
 
     """
-    averaged_projection = [[coarse_projection[degree][cell] * basis[degree].subs(x, value)
+    averaged_projection = [[coarse_projection[degree][cell]
+                            * basis[degree].subs(x, value)
                             for cell in range(num_coarse_grid_cells)
                             for value in [-0.5, 0.5]]
                            for degree in range(polynomial_degree + 1)]
 
-    wavelet_projection = [[multiwavelet_coeffs[degree][cell] * wavelet[degree].subs(z, 0.5) * value
+    wavelet_projection = [[multiwavelet_coeffs[degree][cell]
+                           * wavelet[degree].subs(z, 0.5) * value
                            for cell in range(num_coarse_grid_cells)
-                           for value in [(-1) ** (polynomial_degree + degree + 1), 1]]
+                           for value in [(-1) ** (polynomial_degree
+                                                  + degree + 1), 1]]
                           for degree in range(polynomial_degree + 1)]
 
     projected_coarse = np.sum(averaged_projection, axis=0)
     projected_fine = np.sum([fine_projection[degree] * basis[degree].subs(x, 0)
-                             for degree in range(polynomial_degree + 1)], axis=0)
+                             for degree in range(polynomial_degree + 1)],
+                            axis=0)
     projected_wavelet_coeffs = np.sum(wavelet_projection, axis=0)
 
     plt.figure('coeff_details')
@@ -165,8 +173,9 @@ def plot_details(fine_projection: ndarray, fine_mesh: ndarray, coarse_projection
     plt.title('Wavelet Coefficients')
 
 
-def calculate_approximate_solution(projection: ndarray, points: ndarray, polynomial_degree: int,
-                                   basis: ndarray) -> ndarray:
+def calculate_approximate_solution(
+        projection: ndarray, points: ndarray, polynomial_degree: int,
+        basis: ndarray) -> ndarray:
     """Calculates approximate solution.
 
     Parameters
@@ -188,7 +197,8 @@ def calculate_approximate_solution(projection: ndarray, points: ndarray, polynom
     """
     num_points = len(points)
 
-    basis_matrix = [[basis[degree].subs(x, points[point]) for point in range(num_points)]
+    basis_matrix = [[basis[degree].subs(x, points[point])
+                     for point in range(num_points)]
                     for degree in range(polynomial_degree+1)]
 
     approx = [[sum(projection[degree][cell] * basis_matrix[degree][point]
@@ -199,9 +209,10 @@ def calculate_approximate_solution(projection: ndarray, points: ndarray, polynom
     return np.reshape(np.array(approx), (1, len(approx) * num_points))
 
 
-def calculate_exact_solution(mesh: ndarray, cell_len: float, wave_speed: float, final_time: float,
-                             interval_len: float, quadrature: Quadrature,
-                             init_cond: InitialCondition) -> Tuple[ndarray, ndarray]:
+def calculate_exact_solution(
+        mesh: ndarray, cell_len: float, wave_speed: float, final_time:
+        float, interval_len: float, quadrature: Quadrature, init_cond:
+        InitialCondition) -> Tuple[ndarray, ndarray]:
     """Calculates exact solution.
 
     Parameters
@@ -238,7 +249,8 @@ def calculate_exact_solution(mesh: ndarray, cell_len: float, wave_speed: float,
 
         eval_values = []
         for point in range(len(eval_points)):
-            new_entry = init_cond.calculate(eval_points[point] - wave_speed * final_time
+            new_entry = init_cond.calculate(eval_points[point]
+                                            - wave_speed * final_time
                                             + num_periods * interval_len)
             eval_values.append(new_entry)
 
@@ -273,11 +285,14 @@ def plot_classification_accuracy(evaluation_dict: dict, colors: dict) -> None:
     step_len = 1
     adjustment = -(len(model_names)//2)*step_len
     for measure in evaluation_dict:
-        model_eval = [evaluation_dict[measure][model] for model in evaluation_dict[measure]]
-        ax.bar(pos + adjustment*width, model_eval, width, label=measure, color=colors[measure])
+        model_eval = [evaluation_dict[measure][model]
+                      for model in evaluation_dict[measure]]
+        ax.bar(pos + adjustment*width, model_eval, width, label=measure,
+               color=colors[measure])
         adjustment += step_len
     ax.set_xticks(pos)
-    ax.set_xticklabels(model_names, rotation=50, ha='right', fontsize=font_size)
+    ax.set_xticklabels(model_names, rotation=50, ha='right',
+                       fontsize=font_size)
     ax.set_ylabel('Classification (%)')
     ax.set_ylim(bottom=-0.02)
     ax.set_ylim(top=1.02)
@@ -308,18 +323,22 @@ def plot_boxplot(evaluation_dict: dict, colors: dict) -> None:
     pos = np.arange(len(model_names))
     width = 1/(5*len(model_names))
     for measure in evaluation_dict:
-        model_eval = [evaluation_dict[measure][model] for model in evaluation_dict[measure]]
-        boxplot = ax.boxplot(model_eval, positions=pos + adjustment*width, widths=width,
-                             meanline=True, showmeans=True, patch_artist=True)
+        model_eval = [evaluation_dict[measure][model]
+                      for model in evaluation_dict[measure]]
+        boxplot = ax.boxplot(model_eval, positions=pos + adjustment*width,
+                             widths=width, meanline=True, showmeans=True,
+                             patch_artist=True)
         for patch in boxplot['boxes']:
             patch.set(facecolor=colors[measure])
         boxplots.append(boxplot)
         adjustment += step_len
 
     ax.set_xticks(pos)
-    ax.set_xticklabels(model_names, rotation=50, ha='right', fontsize=font_size)
+    ax.set_xticklabels(model_names, rotation=50, ha='right',
+                       fontsize=font_size)
     ax.set_ylim(bottom=-0.02)
     ax.set_ylim(top=1.02)
     ax.set_ylabel('Classification (%)')
     ax.set_title('Classification Evaluation (Boxplot)')
-    ax.legend([bp["boxes"][0] for bp in boxplots], evaluation_dict.keys(), loc='upper right')
+    ax.legend([bp["boxes"][0] for bp in boxplots], evaluation_dict.keys(),
+              loc='upper right')
diff --git a/Quadrature.py b/Quadrature.py
index c9920a461c2e9c43535d2d76acf653e0102267b1..23fba30932c47f3ac1c7bce9c94af16a31de2f8d 100644
--- a/Quadrature.py
+++ b/Quadrature.py
@@ -9,7 +9,8 @@ import numpy.polynomial.legendre as leg
 class Quadrature(object):
     """Class for quadrature.
 
-    A quadrature is used to determine the approximation of a definite integral of a function.
+    A quadrature is used to determine the approximation of a definite integral
+    of a function.
 
     Attributes
     ----------
@@ -108,5 +109,5 @@ class Gauss(Quadrature):
         self._eval_points, self._weights = leg.leggauss(self._num_eval_points)
 
     def get_name(self):
-        """Returns string of class name concatenated with the number of evaluation points."""
+        """Returns string of class name."""
         return self.__class__.__name__ + str(self._num_eval_points)
diff --git a/Snakefile b/Snakefile
index 70aa49e274085528d6a667e62c6d2284f4d7a81b..06d6f8c54e8db875c6760e052cf66aeab42a614f 100644
--- a/Snakefile
+++ b/Snakefile
@@ -14,13 +14,15 @@ use rule * from ann_data as ANN_*
 
 module ann_training:
     snakefile: DIR + '/ANN_training.smk'
-    config: {**config['ANN_Training'], 'data_directory': config['data_directory']}
+    config: {**config['ANN_Training'],
+             'data_directory': config['data_directory']}
 
 use rule * from ann_training as ANN_*
 
 module approximation:
     snakefile: DIR + '/approximation.smk'
-    config: {**config['Approximation'], 'data_directory': config['data_directory']}
+    config: {**config['Approximation'],
+             'data_directory': config['data_directory']}
 
 use rule * from approximation as DG_*
 
diff --git a/Troubled_Cell_Detector.py b/Troubled_Cell_Detector.py
index e73d6d5d9629002dce37c7a357618bfea3bb88bd..ca66c5cdb445ade49c2751e473a3b95d3f9be913 100644
--- a/Troubled_Cell_Detector.py
+++ b/Troubled_Cell_Detector.py
@@ -2,7 +2,8 @@
 """
 @author: Laura C. Kühle, Soraya Terrab (sorayaterrab)
 
-TODO: Adjust TCs for wavelet detectors (sliding window over all cells instead of every second)
+TODO: Adjust TCs for wavelet detectors (sliding window over all cells instead
+    of every second)
 TODO: Adjust Boxplot approach (adjacent cells, outer fence, etc.)
 TODO: Give detailed description of wavelet detection
 
@@ -15,8 +16,9 @@ import torch
 from sympy import Symbol
 
 import ANN_Model
-from Plotting import plot_solution_and_approx, plot_semilog_error, plot_error, plot_shock_tube, \
-    plot_details, calculate_approximate_solution, calculate_exact_solution
+from Plotting import plot_solution_and_approx, plot_semilog_error, \
+    plot_error, plot_shock_tube, plot_details, \
+    calculate_approximate_solution, calculate_exact_solution
 
 matplotlib.use('Agg')
 x = Symbol('x')
@@ -44,11 +46,12 @@ class TroubledCellDetector(object):
     calculate_cell_average_and_reconstructions(projection, stencil_length)
         Calculates cell averages and reconstructions for a given projection.
     plot_results(projection, troubled_cell_history, time_history)
-        Plots results and troubled cells of a projection given its evaluation history.
+        Plots results and troubled cells of a projection.
 
     """
-    def __init__(self, config, mesh, wave_speed, polynomial_degree, num_grid_cells, final_time,
-                 left_bound, right_bound, basis, init_cond, quadrature):
+    def __init__(self, config, mesh, wave_speed, polynomial_degree,
+                 num_grid_cells, final_time, left_bound, right_bound, basis,
+                 init_cond, quadrature):
         """Initializes TroubledCellDetector.
 
         Parameters
@@ -129,11 +132,13 @@ class TroubledCellDetector(object):
         """
         pass
 
-    def calculate_cell_average_and_reconstructions(self, projection, stencil_length):
+    def calculate_cell_average_and_reconstructions(self, projection,
+                                                   stencil_length):
         """Calculates cell averages and reconstructions for a given projection.
 
-        Calculate the cell averages of all cells in a projection. Reconstructions are only
-        calculated for the middle cell and added left and right to it, respectively.
+        Calculate the cell averages of all cells in a projection.
+        Reconstructions are only calculated for the middle cell and added
+        left and right to it, respectively.
 
         Parameters
         ----------
@@ -145,22 +150,31 @@ class TroubledCellDetector(object):
         Returns
         -------
         ndarray
-            Matrix containing cell averages and reconstructions for initial projection.
+            Matrix containing cell averages and reconstructions for initial
+            projection.
 
         """
         cell_averages = calculate_approximate_solution(
             projection, [0], 0, self._basis.get_basis_vector())
         left_reconstructions = calculate_approximate_solution(
-            projection, [-1], self._polynomial_degree, self._basis.get_basis_vector())
+            projection, [-1], self._polynomial_degree,
+            self._basis.get_basis_vector())
         right_reconstructions = calculate_approximate_solution(
-            projection, [1], self._polynomial_degree, self._basis.get_basis_vector())
+            projection, [1], self._polynomial_degree,
+            self._basis.get_basis_vector())
         middle_idx = stencil_length//2
-        return np.array(list(map(np.float64, zip(cell_averages[:, :middle_idx],
-                        left_reconstructions[:, middle_idx], cell_averages[:, middle_idx],
-                        right_reconstructions[:, middle_idx], cell_averages[:, middle_idx+1:]))))
+        return np.array(
+            list(map(np.float64, zip(cell_averages[:, :middle_idx],
+                                     left_reconstructions[:, middle_idx],
+                                     cell_averages[:, middle_idx],
+                                     right_reconstructions[:, middle_idx],
+                                     cell_averages[:, middle_idx+1:]))))
 
     def plot_results(self, projection, troubled_cell_history, time_history):
-        """Plots results and troubled cells of a projection given its evaluation history.
+        """Plots results and troubled cells of a projection.
+
+        Plots results and troubled cells of a projection given its evaluation
+        history.
 
         Parameters
         ----------
@@ -172,7 +186,8 @@ class TroubledCellDetector(object):
             List of value of each time step.
 
         """
-        plot_shock_tube(self._num_grid_cells, troubled_cell_history, time_history)
+        plot_shock_tube(self._num_grid_cells, troubled_cell_history,
+                        time_history)
         max_error = self._plot_mesh(projection)
 
         print('p =', self._polynomial_degree)
@@ -194,16 +209,18 @@ class TroubledCellDetector(object):
 
         """
         grid, exact = calculate_exact_solution(
-            self._mesh[2:-2], self._cell_len, self._wave_speed, self._final_time,
-            self._interval_len, self._quadrature, self._init_cond)
+            self._mesh[2:-2], self._cell_len, self._wave_speed,
+            self._final_time, self._interval_len, self._quadrature,
+            self._init_cond)
         approx = calculate_approximate_solution(
-            projection[:, 1:-1], self._quadrature.get_eval_points(), self._polynomial_degree,
-            self._basis.get_basis_vector())
+            projection[:, 1:-1], self._quadrature.get_eval_points(),
+            self._polynomial_degree, self._basis.get_basis_vector())
 
         pointwise_error = np.abs(exact-approx)
         max_error = np.max(pointwise_error)
 
-        plot_solution_and_approx(grid, exact, approx, self._colors['exact'], self._colors['approx'])
+        plot_solution_and_approx(grid, exact, approx, self._colors['exact'],
+                                 self._colors['approx'])
         plt.legend(['Exact', 'Approx'])
         plot_semilog_error(grid, pointwise_error)
         plot_error(grid, exact, approx)
@@ -268,9 +285,11 @@ class ArtificialNeuralNetwork(TroubledCellDetector):
         self._stencil_len = config.pop('stencil_len', 3)
         self._model = config.pop('model', 'ThreeLayerReLu')
         self._model_config = config.pop('model_config', {
-            'input_size': self._stencil_len+2, 'first_hidden_size': 8, 'second_hidden_size': 4,
-            'output_size': 2, 'activation_function': 'Softmax', 'activation_config': {'dim': 1}})
-        model_state = config.pop('model_state', 'Snakemake-Test/trained models/model__Adam.pt')
+            'input_size': self._stencil_len+2, 'first_hidden_size': 8,
+            'second_hidden_size': 4, 'output_size': 2,
+            'activation_function': 'Softmax', 'activation_config': {'dim': 1}})
+        model_state = config.pop('model_state', 'Snakemake-Test/trained '
+                                                'models/model__Adam.pt')
 
         if not hasattr(ANN_Model, self._model):
             raise ValueError('Invalid model: "%s"' % self._model)
@@ -297,14 +316,17 @@ class ArtificialNeuralNetwork(TroubledCellDetector):
         # Reset ghost cells to adjust for stencil length
         num_ghost_cells = self._stencil_len//2
         projection = projection[:, 1:-1]
-        projection = np.concatenate((projection[:, -num_ghost_cells:], projection,
-                                     projection[:, :num_ghost_cells]),
-                                    axis=1)
+        projection = np.concatenate((projection[:, -num_ghost_cells:],
+                                     projection,
+                                     projection[:, :num_ghost_cells]), axis=1)
 
         # Calculate input data depending on stencil length
-        input_data = torch.from_numpy(np.vstack([self.calculate_cell_average_and_reconstructions(
-            projection[:, cell-num_ghost_cells:cell+num_ghost_cells+1], self._stencil_len)
-            for cell in range(num_ghost_cells, len(projection[0])-num_ghost_cells)]))
+        input_data = torch.from_numpy(
+            np.vstack([self.calculate_cell_average_and_reconstructions(
+                projection[:, cell-num_ghost_cells:cell+num_ghost_cells+1],
+                self._stencil_len)
+                for cell in range(num_ghost_cells,
+                                  len(projection[0])-num_ghost_cells)]))
 
         # Determine troubled cells
         model_output = torch.argmax(self._model(input_data.float()), dim=1)
@@ -359,7 +381,8 @@ class WaveletDetector(TroubledCellDetector):
             List of indices for all detected troubled cells.
 
         """
-        multiwavelet_coeffs = self._calculate_wavelet_coeffs(projection[:, 1: -1])
+        multiwavelet_coeffs = self._calculate_wavelet_coeffs(
+            projection[:, 1: -1])
         return self._get_cells(multiwavelet_coeffs, projection)
 
     def _calculate_wavelet_coeffs(self, projection):
@@ -378,8 +401,9 @@ class WaveletDetector(TroubledCellDetector):
         """
         output_matrix = []
         for i in range(self._num_coarse_grid_cells):
-            new_entry = 0.5*(projection[:, 2*i] @ self._wavelet_projection_left
-                             + projection[:, 2*i+1] @ self._wavelet_projection_right)
+            new_entry = 0.5*(
+                    projection[:, 2*i] @ self._wavelet_projection_left
+                    + projection[:, 2*i+1] @ self._wavelet_projection_right)
             output_matrix.append(new_entry)
         return np.transpose(np.array(output_matrix))
 
@@ -402,9 +426,10 @@ class WaveletDetector(TroubledCellDetector):
         return []
 
     def plot_results(self, projection, troubled_cell_history, time_history):
-        """Plots results and troubled cells of a projection given its evaluation history.
+        """Plots results and troubled cells of a projection.
 
-        Plots results on coarse and fine grid, errors, troubled cells, and coefficient details.
+        Plots results on coarse and fine grid, errors, troubled cells,
+        and coefficient details given the projections evaluation history.
 
         Parameters
         ----------
@@ -419,8 +444,9 @@ class WaveletDetector(TroubledCellDetector):
         multiwavelet_coeffs = self._calculate_wavelet_coeffs(projection)
         coarse_projection = self._calculate_coarse_projection(projection)
         plot_details(projection[:, 1:-1], self._mesh[2:-2], coarse_projection,
-                     self._basis.get_basis_vector(), self._basis.get_wavelet_vector(),
-                     multiwavelet_coeffs, self._num_coarse_grid_cells,
+                     self._basis.get_basis_vector(),
+                     self._basis.get_wavelet_vector(), multiwavelet_coeffs,
+                     self._num_coarse_grid_cells,
                      self._polynomial_degree)
         super().plot_results(projection, troubled_cell_history, time_history)
 
@@ -438,7 +464,8 @@ class WaveletDetector(TroubledCellDetector):
             Matrix of projection on coarse grid for each polynomial degree.
 
         """
-        basis_projection_left, basis_projection_right = self._basis.get_basis_projections()
+        basis_projection_left, basis_projection_right\
+            = self._basis.get_basis_projections()
 
         # Remove ghost cells
         projection = projection[:, 1:-1]
@@ -446,8 +473,9 @@ class WaveletDetector(TroubledCellDetector):
         # Calculate projection on coarse mesh
         output_matrix = []
         for i in range(self._num_coarse_grid_cells):
-            new_entry = 0.5 * (projection[:, 2 * i] @ basis_projection_left
-                               + projection[:, 2 * i + 1] @ basis_projection_right)
+            new_entry = 0.5 * (
+                    projection[:, 2 * i] @ basis_projection_left
+                    + projection[:, 2 * i + 1] @ basis_projection_right)
             output_matrix.append(new_entry)
         coarse_projection = np.transpose(np.array(output_matrix))
 
@@ -469,26 +497,30 @@ class WaveletDetector(TroubledCellDetector):
         """
 
         grid, exact = calculate_exact_solution(
-            self._mesh[2:-2], self._cell_len, self._wave_speed, self._final_time,
-            self._interval_len, self._quadrature, self._init_cond)
+            self._mesh[2:-2], self._cell_len, self._wave_speed,
+            self._final_time, self._interval_len, self._quadrature,
+            self._init_cond)
         approx = calculate_approximate_solution(
-            projection[:, 1:-1], self._quadrature.get_eval_points(), self._polynomial_degree,
-            self._basis.get_basis_vector())
+            projection[:, 1:-1], self._quadrature.get_eval_points(),
+            self._polynomial_degree, self._basis.get_basis_vector())
 
         pointwise_error = np.abs(exact-approx)
         max_error = np.max(pointwise_error)
 
         self._plot_coarse_mesh(projection)
-        plot_solution_and_approx(grid, exact, approx, self._colors['fine_exact'],
+        plot_solution_and_approx(grid, exact, approx,
+                                 self._colors['fine_exact'],
                                  self._colors['fine_approx'])
-        plt.legend(['Exact (Coarse)', 'Approx (Coarse)', 'Exact (Fine)', 'Approx (Fine)'])
+        plt.legend(['Exact (Coarse)', 'Approx (Coarse)', 'Exact (Fine)',
+                    'Approx (Fine)'])
         plot_semilog_error(grid, pointwise_error)
         plot_error(grid, exact, approx)
 
         return max_error
 
     def _plot_coarse_mesh(self, projection):
-        """Plots exact and approximate solution as well as errors for a coarse projection.
+        """Plots exact and approximate solution as well as errors for a coarse
+        projection.
 
         Parameters
         ----------
@@ -505,13 +537,15 @@ class WaveletDetector(TroubledCellDetector):
 
         # Plot exact and approximate solutions for coarse mesh
         grid, exact = calculate_exact_solution(
-            coarse_mesh[1:-1], coarse_cell_len, self._wave_speed, self._final_time,
-            self._interval_len, self._quadrature, self._init_cond)
+            coarse_mesh[1:-1], coarse_cell_len, self._wave_speed,
+            self._final_time, self._interval_len, self._quadrature,
+            self._init_cond)
         approx = calculate_approximate_solution(
-            coarse_projection, self._quadrature.get_eval_points(), self._polynomial_degree,
-            self._basis.get_basis_vector())
+            coarse_projection, self._quadrature.get_eval_points(),
+            self._polynomial_degree, self._basis.get_basis_vector())
         plot_solution_and_approx(
-            grid, exact, approx, self._colors['coarse_exact'], self._colors['coarse_approx'])
+            grid, exact, approx, self._colors['coarse_exact'],
+            self._colors['coarse_approx'])
 
 
 class Boxplot(WaveletDetector):
@@ -556,7 +590,8 @@ class Boxplot(WaveletDetector):
             List of indices for all detected troubled cells.
 
         """
-        indexed_coeffs = [[multiwavelet_coeffs[0, i], i]for i in range(self._num_coarse_grid_cells)]
+        indexed_coeffs = [[multiwavelet_coeffs[0, i], i]
+                          for i in range(self._num_coarse_grid_cells)]
 
         if self._num_coarse_grid_cells < self._fold_len:
             self._fold_len = self._num_coarse_grid_cells
@@ -565,18 +600,23 @@ class Boxplot(WaveletDetector):
         troubled_cells = []
 
         for fold in range(num_folds):
-            sorted_fold = sorted(indexed_coeffs[fold * self._fold_len:(fold+1) * self._fold_len])
+            sorted_fold = sorted(indexed_coeffs[fold * self._fold_len:
+                                                (fold+1) * self._fold_len])
 
             boundary_index = self._fold_len//4
             balance_factor = self._fold_len/4.0 - boundary_index
 
-            first_quartile = (1-balance_factor) * sorted_fold[boundary_index-1][0] \
+            first_quartile = (1-balance_factor) \
+                * sorted_fold[boundary_index-1][0] \
                 + balance_factor * sorted_fold[boundary_index][0]
-            third_quartile = (1-balance_factor) * sorted_fold[3*boundary_index-1][0]\
+            third_quartile = (1-balance_factor) \
+                * sorted_fold[3*boundary_index-1][0]\
                 + balance_factor * sorted_fold[3*boundary_index][0]
 
-            lower_bound = first_quartile - self._whisker_len * (third_quartile-first_quartile)
-            upper_bound = third_quartile + self._whisker_len * (third_quartile-first_quartile)
+            lower_bound = first_quartile \
+                - self._whisker_len * (third_quartile-first_quartile)
+            upper_bound = third_quartile \
+                + self._whisker_len * (third_quartile-first_quartile)
 
             # Check for lower extreme outliers and add respective cells
             for cell in sorted_fold:
@@ -596,7 +636,8 @@ class Boxplot(WaveletDetector):
 
 
 class Theoretical(WaveletDetector):
-    """"Class for troubled-cell detection based on the projection averages and a cutoff factor.
+    """Class for troubled-cell detection based on the projection averages and
+    a cutoff factor.
 
     Attributes
     ----------
@@ -616,7 +657,8 @@ class Theoretical(WaveletDetector):
         super()._reset(config)
 
         # Unpack necessary configurations
-        self._cutoff_factor = config.pop('cutoff_factor', np.sqrt(2) * self._cell_len)
+        self._cutoff_factor = config.pop('cutoff_factor',
+                                         np.sqrt(2) * self._cell_len)
         # comment to line above: or 2 or 3
 
     def _get_cells(self, multiwavelet_coeffs, projection):
@@ -636,8 +678,9 @@ class Theoretical(WaveletDetector):
 
         """
         troubled_cells = []
-        max_avg = np.sqrt(0.5) * max(1, max(abs(projection[0][cell+1])
-                                            for cell in range(self._num_coarse_grid_cells)))
+        max_avg = np.sqrt(0.5) \
+            * max(1, max(abs(projection[0][cell+1])
+                         for cell in range(self._num_coarse_grid_cells)))
 
         for cell in range(self._num_coarse_grid_cells):
             if self._is_troubled_cell(multiwavelet_coeffs, cell, max_avg):
@@ -665,6 +708,7 @@ class Theoretical(WaveletDetector):
         """
         max_value = max(abs(multiwavelet_coeffs[degree][cell])
                         for degree in range(self._polynomial_degree+1))/max_avg
-        eps = self._cutoff_factor / (self._cell_len*self._num_coarse_grid_cells*2)
+        eps = self._cutoff_factor\
+            / (self._cell_len*self._num_coarse_grid_cells*2)
 
         return max_value > eps
diff --git a/Update_Scheme.py b/Update_Scheme.py
index 8200949a898d702d668abba2a9cce4e1e8b2608b..aaa080b7c7d7921f281263daba33813ce2cd27fb 100644
--- a/Update_Scheme.py
+++ b/Update_Scheme.py
@@ -2,8 +2,8 @@
 """
 @author: Laura C. Kühle
 
-TODO: Discuss descriptions (matrices, cfl number, right-hand side, limiting slope, basis, wavelet,
-    etc.)
+TODO: Discuss descriptions (matrices, cfl number, right-hand side,
+    limiting slope, basis, wavelet, etc.)
 TODO: Discuss referencing info on SSPRK3
 
 """
@@ -64,7 +64,8 @@ class UpdateScheme(object):
                     new_entry = 1.0
                 new_row.append(new_entry*np.sqrt((i+0.5) * (j+0.5)))
             matrix.append(new_row)
-        self._stiffness_matrix = np.array(matrix)  # former: inv_mass @ np.array(matrix)
+        self._stiffness_matrix = np.array(matrix)
+        # former: inv_mass @ np.array(matrix)
 
         # Set boundary matrix
         matrix = []
@@ -74,7 +75,8 @@ class UpdateScheme(object):
                 new_entry = np.sqrt((i+0.5) * (j+0.5)) * (-1.0)**i
                 new_row.append(new_entry)
             matrix.append(new_row)
-        self._boundary_matrix = np.array(matrix)  # former: inv_mass @ np.array(matrix)
+        self._boundary_matrix = np.array(matrix)
+        # former: inv_mass @ np.array(matrix)
 
     def get_name(self):
         """Returns string of class name."""
@@ -93,12 +95,14 @@ class UpdateScheme(object):
         Returns
         -------
         current_projection : ndarray
-            Matrix of projection of current update step for each polynomial degree.
+            Matrix of projection of current update step for each polynomial
+            degree.
         troubled_cells : list
             List of indices for all detected troubled cells.
 
         """
-        current_projection, troubled_cells = self._apply_stability_method(projection, cfl_number)
+        current_projection, troubled_cells = self._apply_stability_method(
+            projection, cfl_number)
 
         return current_projection, troubled_cells
 
@@ -115,7 +119,8 @@ class UpdateScheme(object):
         Returns
         -------
         current_projection : ndarray
-            Matrix of projection of current update step for each polynomial degree.
+            Matrix of projection of current update step for each polynomial
+            degree.
         troubled_cells : list
             List of indices for all detected troubled cells.
 
@@ -128,7 +133,8 @@ class UpdateScheme(object):
         Parameters
         ----------
         current_projection : ndarray
-            Matrix of projection of current update step for each polynomial degree.
+            Matrix of projection of current update step for each polynomial
+            degree.
 
         Returns
         -------
@@ -142,7 +148,8 @@ class UpdateScheme(object):
 
         new_projection = current_projection.copy()
         for cell in troubled_cells:
-            new_projection[:,  cell] = self._limiter.apply(current_projection, cell)
+            new_projection[:,  cell] = self._limiter.apply(current_projection,
+                                                           cell)
 
         return new_projection, troubled_cells
 
@@ -154,16 +161,19 @@ class UpdateScheme(object):
         Parameters
         ----------
         current_projection : ndarray
-            Matrix of projection of current update step for each polynomial degree.
+            Matrix of projection of current update step for each polynomial
+            degree.
 
         Returns
         -------
         current_projection : ndarray
-            Matrix of projection of current update step for each polynomial degree.
+            Matrix of projection of current update step for each polynomial
+            degree.
 
         """
         current_projection[:, 0] = current_projection[:, self._num_grid_cells]
-        current_projection[:, self._num_grid_cells+1] = current_projection[:, 1]
+        current_projection[:, self._num_grid_cells+1] \
+            = current_projection[:, 1]
         return current_projection
 
 
@@ -189,26 +199,34 @@ class SSPRK3(UpdateScheme):
         Returns
         -------
         current_projection : ndarray
-            Matrix of projection of current update step for each polynomial degree.
+            Matrix of projection of current update step for each polynomial
+            degree.
         troubled_cells : list
             List of indices for all detected troubled cells.
 
         """
         original_projection = projection
 
-        current_projection = self._apply_first_step(original_projection, cfl_number)
+        current_projection = self._apply_first_step(original_projection,
+                                                    cfl_number)
         current_projection, __ = self._apply_limiter(current_projection)
-        current_projection = self._enforce_boundary_condition(current_projection)
+        current_projection = self._enforce_boundary_condition(
+            current_projection)
 
-        current_projection = self._apply_second_step(original_projection, current_projection,
+        current_projection = self._apply_second_step(original_projection,
+                                                     current_projection,
                                                      cfl_number)
         current_projection, __ = self._apply_limiter(current_projection)
-        current_projection = self._enforce_boundary_condition(current_projection)
+        current_projection = self._enforce_boundary_condition(
+            current_projection)
 
-        current_projection = self._apply_third_step(original_projection, current_projection,
+        current_projection = self._apply_third_step(original_projection,
+                                                    current_projection,
                                                     cfl_number)
-        current_projection, troubled_cells = self._apply_limiter(current_projection)
-        current_projection = self._enforce_boundary_condition(current_projection)
+        current_projection, troubled_cells = self._apply_limiter(
+            current_projection)
+        current_projection = self._enforce_boundary_condition(
+            current_projection)
 
         return current_projection, troubled_cells
 
@@ -231,7 +249,8 @@ class SSPRK3(UpdateScheme):
         right_hand_side = self._update_right_hand_side(original_projection)
         return original_projection + (cfl_number*right_hand_side)
 
-    def _apply_second_step(self, original_projection, current_projection, cfl_number):
+    def _apply_second_step(self, original_projection, current_projection,
+                           cfl_number):
         """Applies second step of SSPRK3.
 
         Parameters
@@ -239,7 +258,8 @@ class SSPRK3(UpdateScheme):
         original_projection : ndarray
             Matrix of original projection for each polynomial degree.
         current_projection : ndarray
-            Matrix of projection of current update step for each polynomial degree.
+            Matrix of projection of current update step for each polynomial
+            degree.
         cfl_number : float
             CFL number to ensure stability.
 
@@ -250,9 +270,11 @@ class SSPRK3(UpdateScheme):
 
         """
         right_hand_side = self._update_right_hand_side(current_projection)
-        return 1/4 * (3*original_projection + (current_projection + cfl_number*right_hand_side))
+        return 1/4 * (3*original_projection
+                      + (current_projection + cfl_number*right_hand_side))
 
-    def _apply_third_step(self, original_projection, current_projection, cfl_number):
+    def _apply_third_step(self, original_projection, current_projection,
+                          cfl_number):
         """Applies third step of SSPRK3.
 
         Parameter
@@ -260,7 +282,8 @@ class SSPRK3(UpdateScheme):
         original_projection : ndarray
             Matrix of original projection for each polynomial degree.
         current_projection : ndarray
-            Matrix of projection of current update step for each polynomial degree.
+            Matrix of projection of current update step for each polynomial
+            degree.
         cfl_number : float
             CFL number to ensure stability.
 
@@ -271,7 +294,8 @@ class SSPRK3(UpdateScheme):
 
         """
         right_hand_side = self._update_right_hand_side(current_projection)
-        return 1/3 * (original_projection + 2*(current_projection + cfl_number*right_hand_side))
+        return 1/3 * (original_projection
+                      + 2*(current_projection + cfl_number*right_hand_side))
 
     def _update_right_hand_side(self, current_projection):
         """Updates right-hand side.
@@ -279,7 +303,8 @@ class SSPRK3(UpdateScheme):
         Parameter
         ---------
         current_projection : ndarray
-            Matrix of projection of current update step for each polynomial degree.
+            Matrix of projection of current update step for each polynomial
+            degree.
 
         Returns
         -------
@@ -291,8 +316,10 @@ class SSPRK3(UpdateScheme):
         right_hand_side = [0]
 
         for j in range(self._num_grid_cells):
-            right_hand_side.append(2*(self._stiffness_matrix @ current_projection[:, j+1]
-                                      + self._boundary_matrix @ current_projection[:, j]))
+            right_hand_side.append(2*(self._stiffness_matrix
+                                      @ current_projection[:, j+1]
+                                      + self._boundary_matrix
+                                      @ current_projection[:, j]))
 
         # Set ghost cells to respective value
         right_hand_side[0] = right_hand_side[self._num_grid_cells]
diff --git a/workflows/ANN_data.smk b/workflows/ANN_data.smk
index b6785904ec64dd2158ddb3f85ebdcf9684b82551..520dd6530ccc87ad1a7fcd4c1bf021843e62fd39 100644
--- a/workflows/ANN_data.smk
+++ b/workflows/ANN_data.smk
@@ -30,12 +30,16 @@ rule generate_data:
             initial_conditions.append({
                 'function': getattr(Initial_Condition, function)(
                     params.left_bound, params.right_bound, {}),
-                'config': {} if function in config['functions'] else config['functions'][function]
+                'config': {} if function in config['functions']
+                else config['functions'][function]
             })
 
         with open(str(log), 'w') as logfile:
             sys.stdout = logfile
-            generator = ANN_Data_Generator.TrainingDataGenerator(initial_conditions=initial_conditions,
-                left_bound=params.left_bound, right_bound=params.right_bound, balance=params.balance,
+            generator = ANN_Data_Generator.TrainingDataGenerator(
+                initial_conditions=initial_conditions,
+                left_bound=params.left_bound, right_bound=params.right_bound,
+                balance=params.balance,
                 stencil_length=params.stencil_length, directory=DIR)
-            data = generator.build_training_data(num_samples=params.sample_number)
\ No newline at end of file
+            data = generator.build_training_data(
+                num_samples=params.sample_number)
\ No newline at end of file
diff --git a/workflows/ANN_training.smk b/workflows/ANN_training.smk
index 42efa8290026342c556dd923e637a519587ab761..9ca50dbbd0664b0ec4586f12410c881b60701c96 100644
--- a/workflows/ANN_training.smk
+++ b/workflows/ANN_training.smk
@@ -13,7 +13,8 @@ MODELS = config['models']
 rule all:
     input:
         expand(DIR+'/trained models/model__{model}.pt', model=MODELS),
-        DIR+'/model evaluation/classification_accuracy/' + '_'.join(MODELS.keys()) + '.pdf'
+        DIR+'/model evaluation/classification_accuracy/'
+        + '_'.join(MODELS.keys()) + '.pdf'
     default_target: True
 
 rule test_model:
@@ -22,7 +23,8 @@ rule test_model:
         DIR+'/normalized_input_data.npy',
         DIR+'/output_data.npy'
     output:
-        DIR+'/model evaluation/classification_accuracy/' + '_'.join(MODELS.keys()) + '.pdf'
+        DIR+'/model evaluation/classification_accuracy/'
+        + '_'.join(MODELS.keys()) + '.pdf'
     params:
         num_iterations = config['num_iterations'],
         colors = config['classification_colors'],
@@ -32,14 +34,16 @@ rule test_model:
     run:
         models = {}
         with open(str(log), 'w') as logfile:
-            # sys.stdout = logfile
-            # sys.stderr = logfile
+            sys.stdout = logfile
+            sys.stderr = logfile
             for model in MODELS:
-                trainer= ANN_Training.ModelTrainer({'model_name': model, 'dir': DIR,
-                                                'model_dir': DIR, **MODELS[model]})
+                trainer= ANN_Training.ModelTrainer(
+                    {'model_name': model, 'dir': DIR, 'model_dir': DIR,
+                     **MODELS[model]})
                 models[model] = trainer
-            evaluate_models(models=models, directory=DIR, num_iterations=params.num_iterations,
-                colors=params.colors, compare_normalization=params.compare_normalization)
+            evaluate_models(models=models, directory=DIR,
+                num_iterations=params.num_iterations, colors=params.colors,
+                compare_normalization=params.compare_normalization)
 
 rule train_model:
     input:
@@ -55,6 +59,7 @@ rule train_model:
         with open(str(log), 'w') as logfile:
             sys.stdout = logfile
             training_data = read_training_data(DIR)
-            trainer= ANN_Training.ModelTrainer(config={**MODELS[wildcards.model]})
+            trainer= ANN_Training.ModelTrainer(
+                config={**MODELS[wildcards.model]})
             trainer.epoch_training(dataset=training_data)
             trainer.save_model(directory=DIR, model_name=wildcards.model)
diff --git a/workflows/approximation.smk b/workflows/approximation.smk
index 8519a59711361f3f590847289f4670f6cba3e416..9af2c7cf97acbd68e0ef28f8f25ef25180492037 100644
--- a/workflows/approximation.smk
+++ b/workflows/approximation.smk
@@ -13,13 +13,15 @@ SCHEMES = config['schemes']
 
 rule all:
     input:
-        expand(config['plot_dir'] + '/{plot}/{scheme}.pdf', plot=PLOTS, scheme=SCHEMES)
+        expand(config['plot_dir'] + '/{plot}/{scheme}.pdf', plot=PLOTS,
+            scheme=SCHEMES)
     default_target: True
 
 def get_ANN_model(wildcards):
-    if config['schemes'][wildcards.scheme]['detector'] == 'ArtificialNeuralNetwork':
-        return DIR + '/trained models/' \
-               + config['schemes'][wildcards.scheme]['detector_config']['model_state']
+    if config['schemes'][wildcards.scheme]['detector'] == \
+            'ArtificialNeuralNetwork':
+        return DIR + '/trained models/' + config['schemes'][
+            wildcards.scheme]['detector_config']['model_state']
     return []
 
 rule approximate_solution: