From 217d399e8a47c381f5af83d31b6e7458e4a33c1b Mon Sep 17 00:00:00 2001
From: Joshua Schmidt <joshua.schmidt@uni-duesseldorf.de>
Date: Wed, 3 Nov 2021 12:42:24 +0100
Subject: [PATCH] update

---
 data_classification.py | 353 ++++++++++++++++++++++++++++++-----------
 1 file changed, 258 insertions(+), 95 deletions(-)

diff --git a/data_classification.py b/data_classification.py
index fa285bc..274ded9 100644
--- a/data_classification.py
+++ b/data_classification.py
@@ -1,5 +1,6 @@
 import cv2
 import json
+import csv
 import numpy as np
 import tensorflow as tf
 import os
@@ -7,6 +8,7 @@ import time
 from tensorflow.keras.preprocessing.image import img_to_array
 from tensorflow.keras.preprocessing import image
 from PIL import ImageOps
+import matplotlib.pyplot as plt
 
 class FramePupilPositions:
     def __init__(self, left_pupil_position, right_pupil_position):
@@ -59,102 +61,263 @@ def get_biggest_contour(contours):
         x,y,w,h = cv2.boundingRect(c[0])
         x2,y2,w2,h2 = cv2.boundingRect(c[1])
 
-    return {"1": {"x": x, "y": y, "width": w, "height": h}, "2": {"x": x2, "y": y2, "width": w2, "height": h2}}
+    return {"1": {"x": int(x+w*0.15), "y": int(y+h*0.15), "width": int(w-2*(w*0.1)), "height": int(h-2*(h*0.15))}, "2": {"x": int(x2+w2*0.15), "y": int(y2+h2*0.15), "width": int(w2-2*(w2*0.1)), "height": int(h2-2*(h2*0.15))}}
 
-def downscale_if_necessary(eye_img):
+def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
+    # https://stackoverflow.com/questions/44650888/resize-an-image-without-distortion-opencv
+    dim = None
+    (h, w) = image.shape[:2]
+    r = None
+    if width is None and height is None:
+        return image
+    if width is None:
+        r = height / float(h)
+        dim = (int(w * r), height)
+    else:
+        r = width / float(w)
+        dim = (width, int(h * r))
+    resized = cv2.resize(image, dim, interpolation = inter)
+    return resized
+
+def downscale_if_necessary(width, height, eye_img):
     shape = eye_img.shape
-    if (shape[0] > 128 or shape[1] > 256):
-        return cv2.resize(eye_img, dsize=(256, 128), interpolation=cv2.INTER_CUBIC)
+    if (shape[0] > height):
+        return downscale_if_necessary(width, height, image_resize(eye_img, None, height, inter = cv2.INTER_CUBIC))
+    if (shape[1] > width):
+        return downscale_if_necessary(width, height, image_resize(eye_img, width, None, inter = cv2.INTER_CUBIC))
     return eye_img
 
-cap = cv2.VideoCapture('VertigoVideosIII/data/sophia_reinhardt/Test_selfie_leeraufnahme_sophia_2021-09-28-13-12-39.mp4')
-eye_frames, pupil_positions = read_json_file('VertigoVideosIII/data/sophia_reinhardt/Test_selfie_leeraufnahme_sophia_2021-09-28-13-12-39_fixed.json')
-
-if (cap.isOpened()== False):
-  print("Error opening video stream or file")
-
-model_path = os.path.abspath('best_models_BO_32_fixed_11_0')
-loaded_model = tf.keras.models.load_model(model_path, compile=True)
-
-current_frame = 0
-json_data = []
-while(cap.isOpened()):
-  ret, frame = cap.read()
-  if ret == True:
-    current_eye_frame = eye_frames[current_frame]
-    left_eye_frame = current_eye_frame.left_eye_frame
-    right_eye_frame = current_eye_frame.right_eye_frame
-    #frame = cv2.rectangle(frame, (left_eye_frame.x, left_eye_frame.y), (left_eye_frame.x+left_eye_frame.width, left_eye_frame.y+left_eye_frame.height), (0, 255, 0), 2)
-    #frame = cv2.rectangle(frame, (right_eye_frame.x, right_eye_frame.y), (right_eye_frame.x+right_eye_frame.width, right_eye_frame.y+right_eye_frame.height), (255, 0, 0), 2)
-    # fix
-    #frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
-    #ret, frame_thresh = cv2.threshold(frame_gray, 10, 255, cv2.THRESH_BINARY)
-    #contours, hierarchy = cv2.findContours(image=frame_thresh, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE)
-    #biggest_contour = get_biggest_contour(contours)
-    #frame = cv2.rectangle(frame, (biggest_contour["1"]["x"], biggest_contour["1"]["y"]), (biggest_contour["1"]["x"]+biggest_contour["1"]["width"], biggest_contour["1"]["y"]+biggest_contour["1"]["height"]), (255, 0, 0), 2)
-    #frame = cv2.rectangle(frame, (biggest_contour["2"]["x"], biggest_contour["2"]["y"]), (biggest_contour["2"]["x"]+biggest_contour["2"]["width"], biggest_contour["2"]["y"]+biggest_contour["2"]["height"]), (255, 0, 0), 2)
-    #if (biggest_contour["1"]["x"] < biggest_contour["2"]["x"]):
-    #    left_contour = biggest_contour["1"]
-    #    right_contour = biggest_contour["2"]
-    #else:
-    #    left_contour = biggest_contour["2"]
-    #    right_contour = biggest_contour["1"]
-    #json_data.append({"frame": current_frame, "left_MBR_x": left_contour["x"], "left_MBR_y": left_contour["y"], "left_MBR_width": left_contour["width"], "left_MBR_height": left_contour["height"], "right_MBR_x": right_contour["x"], "right_MBR_y": right_contour["y"], "right_MBR_width": right_contour["width"], "right_MBR_height": right_contour["height"]})
-
-    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
-    offset = 2
-    left_eye = gray[left_eye_frame.y+offset:left_eye_frame.y+left_eye_frame.height-offset*2,left_eye_frame.x+offset:left_eye_frame.x+left_eye_frame.width-offset*2]
-    left_eye = downscale_if_necessary(left_eye)
-    #cv2.imshow('Frame', frame)
-    #cv2.imshow('left eye', left_eye)
-    right_eye = gray[right_eye_frame.y+offset:right_eye_frame.y+right_eye_frame.height-offset*2,right_eye_frame.x+offset:right_eye_frame.x+right_eye_frame.width-offset*2]
-    right_eye = downscale_if_necessary(right_eye)
-    #cv2.imshow('right eye', right_eye)
-    #cv2.waitKey(0)
-    left_eye_bordered = cv2.copyMakeBorder(left_eye, 0, 128-left_eye.shape[0], 0, 256-left_eye.shape[1], cv2.BORDER_CONSTANT, None, value=0)
-    left_eye_bordered = left_eye_bordered.astype('float64')
-    left_eye_bordered *= 255.0/left_eye_bordered.max()
-    left_eye_bordered = img_to_array(left_eye_bordered)
-    left_eye_bordered = np.expand_dims(left_eye_bordered, axis=0)
-    right_eye_bordered = cv2.copyMakeBorder(right_eye, 0, 128-right_eye.shape[0], 0, 256-right_eye.shape[1], cv2.BORDER_CONSTANT, None, value=0)
-    right_eye_bordered = right_eye_bordered.astype('float64')
-    right_eye_bordered *= 255.0/right_eye_bordered.max()
-    right_eye_bordered = img_to_array(right_eye_bordered)
-    right_eye_bordered = np.expand_dims(right_eye_bordered, axis=0)
-
-    #left_start = time.time()
-    left_prediction = loaded_model.predict(left_eye_bordered)
-    left_prediction = left_prediction[0]
-    left_prediction_x = left_eye_frame.x + offset + left_prediction[0]*256
-    left_prediction_y = left_eye_frame.y + offset + left_prediction[1]*128
-    print(left_prediction_x)
-    print(left_prediction_y)
-    frame = cv2.circle(frame, (int(left_prediction_x),int(left_prediction_y)), 3, (255,0,0), 1)
-    #left_end = time.time()
-    #right_start = time.time()
-    right_prediction = loaded_model.predict(right_eye_bordered)
-    right_prediction = right_prediction[0]
-    right_prediction_x = right_eye_frame.x + offset + right_prediction[0]*256
-    right_prediction_y = right_eye_frame.y + offset + right_prediction[1]*128
-    frame = cv2.circle(frame, (int(right_prediction_x),int(right_prediction_y)), 3, (255,0,0), 1)
-    #right_end = time.time()
-    #print(left_prediction)
-    #print(right_prediction)
-    #print("Left time: ")
-    #print(left_end-left_start)
-    #print("\n")
-    #print("Right time: ")
-    #print(right_end-right_start)
-    #print("\n")
-    cv2.imshow('Frame', frame)
-    cv2.waitKey(0)
-
-    current_frame += 1
-    if cv2.waitKey(25) & 0xFF == ord('q'):
-      break
-  else:
-    break
-#with open('vertigo_data/VertigoVideosIII/data/sophia_reinhardt/Test_selfie_leeraufnahme_sophia_2021-09-28-13-12-39_fixed.json', 'w') as outfile:
-#    json.dump(json_data, outfile)
-cap.release()
-cv2.destroyAllWindows()
+def save_pupil_positions(filepath, x, y):
+    json_data = []
+    i = 0
+    for i in range(0, len(x)):
+        json_data.append({"frame": i, "pupil-x": x[i], "pupil-y": y[i]})
+    with open(filepath, 'w') as outfile:
+        json.dump(json_data, outfile)
+
+def save_pupil_positions_csv(filepath, x, y):
+    f = open(filepath, 'w')
+    writer = csv.writer(f)
+    writer.writerow(["frame","pupil-x","pupil-y"])
+    for i in range(0, len(x)):
+        writer.writerow([i,x[i],y[i]])
+    f.close()
+
+def get_plot_pupil_positions(title, one_dim_positions):
+    time_steps = list(range(0, len(one_dim_positions)))
+    plt.plot(time_steps, one_dim_positions)
+    plt.xlabel('Time Steps')
+    plt.ylabel('Pupil Position')
+    plt.title(title)
+    return plt
+
+def plot_pupil_positions(title, one_dim_positions):
+    plt = get_plot_pupil_positions(title, one_dim_positions)
+    plt.show()
+
+def save_plot_from_csv(path):
+    x_pos = []
+    y_pos = []
+    with open(path, newline='') as csvfile:
+        first = True
+        spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
+        for row in spamreader:
+            if not first:
+                entries = (','.join(row)).split(',')
+                print(float(entries[1]))
+                x_pos.append(float(entries[1]))
+                y_pos.append(float(entries[2]))
+            first = False
+    plt = get_plot_pupil_positions(path, x_pos)
+    figure = plt.gcf()
+    figure.set_size_inches(30, 20)
+    plt_path = ".".join(path.split(".")[:-1]) + ".png"
+    plt.savefig(plt_path)
+    plt.show()
+
+def read_pupil_positions_from_json(json_path):
+    pupil_positions_x = []
+    pupil_positions_y = []
+    with open(json_path, 'r') as f:
+        data = json.load(f)
+        eye_frames = []
+        pupil_positions = []
+        for entry in data:
+            pupil_positions_x.append(entry["pupil-x"])
+            pupil_positions_y.append(entry["pupil-y"])
+    return pupil_positions_x, pupil_positions_y
+
+def classify_pupil_positions(json_path):
+    pupil_positions_x, _ = read_pupil_positions_from_json(json_path)
+    print(pupil_positions_x)
+    # in c++ ..
+
+def generate_fixed_json_data(filepath, filename_no_ext):
+    cap = cv2.VideoCapture(filepath + filename_no_ext + '.mp4')
+
+    if (cap.isOpened()== False):
+      print("Error opening video stream or file")
+
+    model_img_width = 256
+    model_img_height = 128
+
+    model_path = os.path.abspath('best_models_BO_32_fixed_11_0')
+    loaded_model = tf.keras.models.load_model(model_path, compile=True)
+
+    #model_path = os.path.abspath('first_model.h5')
+    #loaded_model = tf.keras.models.load_model(model_path)
+
+    current_frame = 0
+    json_data = []
+    print(filename_no_ext)
+    while(cap.isOpened()):
+      ret, frame = cap.read()
+      if ret == True:
+        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+        ret, frame_thresh = cv2.threshold(frame_gray, 10, 255, cv2.THRESH_BINARY)
+        contours, hierarchy = cv2.findContours(image=frame_thresh, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE)
+        biggest_contour = get_biggest_contour(contours)
+        frame = cv2.rectangle(frame, (biggest_contour["1"]["x"], biggest_contour["1"]["y"]), (biggest_contour["1"]["x"]+biggest_contour["1"]["width"], biggest_contour["1"]["y"]+biggest_contour["1"]["height"]), (255, 0, 0), 2)
+        frame = cv2.rectangle(frame, (biggest_contour["2"]["x"], biggest_contour["2"]["y"]), (biggest_contour["2"]["x"]+biggest_contour["2"]["width"], biggest_contour["2"]["y"]+biggest_contour["2"]["height"]), (255, 0, 0), 2)
+        if (biggest_contour["1"]["x"] < biggest_contour["2"]["x"]):
+            left_contour = biggest_contour["1"]
+            right_contour = biggest_contour["2"]
+        else:
+            left_contour = biggest_contour["2"]
+            right_contour = biggest_contour["1"]
+        json_data.append({"frame": current_frame, "left_MBR_x": left_contour["x"], "left_MBR_y": left_contour["y"], "left_MBR_width": left_contour["width"], "left_MBR_height": left_contour["height"], "right_MBR_x": right_contour["x"], "right_MBR_y": right_contour["y"], "right_MBR_width": right_contour["width"], "right_MBR_height": right_contour["height"]})
+        current_frame += 1
+        if cv2.waitKey(25) & 0xFF == ord('q'):
+          break
+      else:
+        break
+    cap.release()
+    cv2.destroyAllWindows()
+    with open(filepath + filename_no_ext + ".json", 'w') as outfile:
+        json.dump(json_data, outfile)
+
+def detect_pupil_generate_data(filepath, filename_no_ext):
+    cap = cv2.VideoCapture(filepath + filename_no_ext + '.mp4')
+    eye_frames, pupil_positions = read_json_file(filepath + filename_no_ext + '.json')
+
+    if (cap.isOpened()== False):
+      print("Error opening video stream or file")
+
+    model_img_width = 256
+    model_img_height = 128
+
+    model_path = os.path.abspath('best_models_BO_32_fixed_11_0')
+    loaded_model = tf.keras.models.load_model(model_path, compile=True)
+
+    #model_path = os.path.abspath('first_model.h5')
+    #loaded_model = tf.keras.models.load_model(model_path)
+
+    current_frame = 0
+    left_pupil_positions_x = []
+    left_pupil_positions_y = []
+    right_pupil_positions_x = []
+    right_pupil_positions_y = []
+    print(filename_no_ext)
+    while(cap.isOpened()):
+      ret, frame = cap.read()
+      if ret == True:
+        current_eye_frame = eye_frames[current_frame]
+        left_eye_frame = current_eye_frame.left_eye_frame
+        right_eye_frame = current_eye_frame.right_eye_frame
+        #frame = cv2.rectangle(frame, (left_eye_frame.x, left_eye_frame.y), (left_eye_frame.x+left_eye_frame.width, left_eye_frame.y+left_eye_frame.height), (0, 255, 0), 2)
+        #frame = cv2.rectangle(frame, (right_eye_frame.x, right_eye_frame.y), (right_eye_frame.x+right_eye_frame.width, right_eye_frame.y+right_eye_frame.height), (255, 0, 0), 2)
+        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+        offset = 2
+        left_eye = gray[left_eye_frame.y+offset:left_eye_frame.y+left_eye_frame.height-offset*2,left_eye_frame.x+offset:left_eye_frame.x+left_eye_frame.width-offset*2]
+        left_shape_original = left_eye.shape
+        left_eye = downscale_if_necessary(model_img_width, model_img_height, left_eye)
+        left_shape_scaled = left_eye.shape
+        if left_shape_scaled[1] == 0:
+            left_ratio = 1
+        else:
+            left_ratio = left_shape_original[1]/left_shape_scaled[1]
+        right_eye = gray[right_eye_frame.y+offset:right_eye_frame.y+right_eye_frame.height-offset*2,right_eye_frame.x+offset:right_eye_frame.x+right_eye_frame.width-offset*2]
+        right_shape_original = right_eye.shape
+        right_eye = downscale_if_necessary(model_img_width, model_img_height, right_eye)
+        right_shape_scaled = right_eye.shape
+        if right_shape_scaled[1] == 0:
+            right_ratio = right_shape_original[1]/right_shape_scaled[1]
+        else:
+            right_ratio = 1
+        left_eye_bordered = cv2.copyMakeBorder(left_eye, 0, model_img_height-left_eye.shape[0], 0, model_img_width-left_eye.shape[1], cv2.BORDER_CONSTANT, None, value=0)
+        left_eye_bordered_original = left_eye_bordered
+        left_eye_bordered = left_eye_bordered.astype('float64')
+        #left_eye_bordered *= 255.0/left_eye_bordered.max()
+        #cv2.imshow("left", left_eye_bordered)
+        left_eye_bordered = img_to_array(left_eye_bordered)
+        left_eye_bordered = np.expand_dims(left_eye_bordered, axis=0)
+        right_eye_bordered = cv2.copyMakeBorder(right_eye, 0, model_img_height-right_eye.shape[0], 0, model_img_width-right_eye.shape[1], cv2.BORDER_CONSTANT, None, value=0)
+        #cv2.imshow("right", right_eye_bordered)
+        right_eye_bordered_original = right_eye_bordered
+        right_eye_bordered = right_eye_bordered.astype('float64')
+        #right_eye_bordered *= 255.0/right_eye_bordered.max()
+        right_eye_bordered = img_to_array(right_eye_bordered)
+        right_eye_bordered = np.expand_dims(right_eye_bordered, axis=0)
+        #cv2.waitKey(0)
+
+        #left_start = time.time()
+        print("Predict left..")
+        left_prediction = loaded_model.predict(left_eye_bordered)
+        print("Done.")
+        left_prediction = left_prediction[0]
+        #left_prediction_x = left_eye_frame.x + offset + left_prediction[0] + (left_eye_frame.width - left_eye.shape[1])
+        #left_prediction_y = left_eye_frame.y + offset + left_prediction[1] + (left_eye_frame.height - left_eye.shape[0])
+        #print(left_prediction_x)
+        #print(left_prediction_y)
+        left_eye_bordered_original = cv2.cvtColor(left_eye_bordered_original,cv2.COLOR_GRAY2BGR)
+        left_eye_bordered_original = cv2.circle(left_eye_bordered_original, (int(left_prediction[0]*model_img_width),int(left_prediction[1]*model_img_height)), 3, (0,255,0), 1)
+        #left_end = time.time()
+        left_pupil_positions_x.append(left_eye_frame.x+offset+(left_prediction[0]*model_img_width*left_ratio))
+        left_pupil_positions_y.append(left_eye_frame.y+offset+(left_prediction[1]*model_img_height*left_ratio))
+        #right_start = time.time()
+        print("Predict right..")
+        right_prediction = loaded_model.predict(right_eye_bordered)
+        right_prediction = right_prediction[0]
+        print("Done.")
+        #right_prediction_x = right_eye_frame.x + offset + right_prediction[0] + (right_eye_frame.width - right_eye.shape[1])
+        #right_prediction_y = right_eye_frame.y + offset + right_prediction[1] + (right_eye_frame.height - right_eye.shape[0])
+        right_eye_bordered_original = cv2.cvtColor(right_eye_bordered_original,cv2.COLOR_GRAY2BGR)
+        right_eye_bordered_original = cv2.circle(right_eye_bordered_original, (int(right_prediction[0]*model_img_width),int(right_prediction[1]*model_img_height)), 3, (0,255,0), 1)
+        right_pupil_positions_x.append(right_eye_frame.x+offset+(right_prediction[0]*model_img_width*right_ratio))
+        right_pupil_positions_y.append(right_eye_frame.y+offset+(right_prediction[1]*model_img_height*right_ratio))
+        #cv2.imshow("left prediction", left_eye_bordered_original)
+        #cv2.imshow("right prediction", right_eye_bordered_original)
+        #cv2.waitKey(0)
+        #right_end = time.time()
+        #print(left_prediction)
+        #print(right_prediction)
+        #print("Left time: ")
+        #print(left_end-left_start)
+        #print("\n")
+        #print("Right time: ")
+        #print(right_end-right_start)
+        #print("\n")
+        #cv2.imshow('Frame', frame)
+        #cv2.waitKey(0)
+
+        current_frame += 1
+        if cv2.waitKey(25) & 0xFF == ord('q'):
+          break
+      else:
+        break
+    cap.release()
+    cv2.destroyAllWindows()
+
+    print("Generate data..")
+    save_pupil_positions_csv(filepath + "horizontal_pupil_positions_csv/" + filename_no_ext + "_left.csv", left_pupil_positions_x, left_pupil_positions_y)
+    save_pupil_positions_csv(filepath + "horizontal_pupil_positions_csv/" + filename_no_ext + "_right.csv", right_pupil_positions_x, right_pupil_positions_y)
+    print("Done.")
+    #plot_pupil_positions("left", left_pupil_positions_x)
+    #plot_pupil_positions("right", right_pupil_positions_x)
+#['test_Leer_Jonas_Schneider_selfie_2021-10-18-16-18-31','test_rück_jonas_rechts_2021-10-18-16-39-29','test_selfie_jonas_links_2021-10-18-16-23-14','test_selfie_jonas_rechts_2021-10-18-16-29-42','test_rück_jonas_links_2021-10-18-16-33-47','test_leer_jonas2_2021-10-21-13-29-43','test_rück_jonas_links_2021-10-21-13-15-43','test_rück_jonas_maske_rechts_2021-10-21-13-19-36','test_rück_maske_jonas_links_2021-10-21-13-25-51','test_selfie_maske_jonas_links_2021-10-21-13-44-49','test_selfie_maske_jonas_rechts_2021-10-21-13-39-09']
+#['test_rück_flo_links_2021-10-18-08-39-16','test_rück_flo_rechts_2021-10-18-08-41-52','test_selfie_Flo_links_2021-10-18-08-20-36','test_selfie_flo_rechts_2021-10-18-08-29-42','test_leer_flo_2021-10-20-08-20-22','test_maske_rück_flo_links_2021-10-20-08-37-21','test_maske_rück_flo_rechts_2021-10-20-08-33-04','test_maske_selfie_flo_links_2021-10-20-08-23-19','test_maske_selfie_flo_rechts_2021-10-20-08-27-00']
+#['test_maske_rück_christiane_links_2021-10-19-14-18-12','test_maske_rück_christiane_rechts_2021-10-19-14-23-15','test_maske_selfie_christiane_rechts_2021-10-19-14-09-08','test_selfie_christiane_rechts_2021-10-19-10-56-35','test_selfie_links_christiane_2021-10-13-11-46-58','test_leer_christiane2_2021-10-20-13-19-22','test_rück_christiane_links_2021-10-20-12-41-22','test_rück_christiane_rechts_2021-10-20-13-02-16']
+#['test_leeraufnahme_julia_2021-10-19-16-27-02','test_maske_rück_julia_rechts_2021-10-19-17-04-48','test_maske_rück_julua_links_2021-10-19-17-01-25','test_maske_selfie_julia_links_2021-10-19-16-51-20','test_maske_selfie_julia_rechts_2021-10-19-16-55-55','test_rück_julia_links_2021-10-19-16-41-25','test_rück_julia_rechts_2021-10-19-16-47-01','test_selfie_julia_links_2021-10-19-16-31-56','test_selfie_julia_rechts_2021-10-19-16-35-52']
+#['Selfie_Leeraufnahme_Miriam','Selfie_MiriamSimon_links','Selfie_MiriamSimon_rechts']
+#['Selfie_links_Sarah_Skafa','Test_leeraufnahme_sarak_skafa_2021-09-28-14-41-25','Test_rück_links_sarah_skafa_2021-09-28-14-30-57','Test_Rück_rechts_sarah_skafa_2021-09-28-14-38-43']
+#['Selfie_SophiaReinhardt_links','Selfie_SophiaReinhardt_rechts','Test_rück_links_sophia_2021-09-28-14-53-53','Test_selfie_leeraufnahme_sophia_2021-09-28-13-12-39','test_maske_rückkamera_sophia_rechts_2021-10-19-14-39-44','test_maske_rück_sophia_links_2021-10-19-14-45-03','test_maske_selfie_slphia_rechts_2021-10-19-14-32-40','test_maske_selfie_sophia_links_2021-10-19-14-26-40','test_selfie_sophia_links2_2021-10-18-08-35-28','test_selfie_sophia_rechts_2_2021-10-18-08-33-03']
\ No newline at end of file
-- 
GitLab