diff --git a/Bilder/battle.png b/Bilder/battle.png
index b52050afa73f7fd97fdfcee5712a92e79c7b46f9..92b5b4a83bafb033d5023d7e152fd887f423f2c9 100644
Binary files a/Bilder/battle.png and b/Bilder/battle.png differ
diff --git a/Bilder/bayes_dimensional_embedding.png b/Bilder/bayes_dimensional_embedding.png
new file mode 100644
index 0000000000000000000000000000000000000000..ebda88497c6fe83721d74ad10e34432fe2fac578
Binary files /dev/null and b/Bilder/bayes_dimensional_embedding.png differ
diff --git a/Bilder/bayes_sampling_steps.png b/Bilder/bayes_sampling_steps.png
new file mode 100644
index 0000000000000000000000000000000000000000..fd97bdf264b94ce686f904b232bed6c5c5d87b67
Binary files /dev/null and b/Bilder/bayes_sampling_steps.png differ
diff --git a/Bilder/corrected_results.png b/Bilder/corrected_results.png
new file mode 100644
index 0000000000000000000000000000000000000000..aa922efea3123744c361cd50f32f2fea5194551d
Binary files /dev/null and b/Bilder/corrected_results.png differ
diff --git a/Bilder/reported_results.png b/Bilder/reported_results.png
index 8b51decdae394a6631bb6eddfec223b389b20d41..60fac5d1479900674c0a06c6e1d857e53a740e5c 100644
Binary files a/Bilder/reported_results.png and b/Bilder/reported_results.png differ
diff --git a/baselines.tex b/baselines.tex
index ad47585594e216cbd458d64b9b2edd49c15e7968..0407d8b31ef383b465030814298a39719fb45183 100644
--- a/baselines.tex
+++ b/baselines.tex
@@ -31,7 +31,7 @@ To prepare the two learning procedures they were initialized with a \textit{gaus
 
 For both approaches the number of \textit{sampling steps} was then set to \textit{128}. Since \textit{SGD} has two additional \textit{hyperparameters} $\lambda, \gamma$ these were also determined. Overall, the \textit{MovieLens10M-dataset} was evaluated by a \textit{10-fold cross-validation} over a \textit{random global} and \textit{non-overlapping 90:10 split}. In each split, \textit{90\%} of the data was used for \textit{training} and \textit{10\%} of the data was used for \textit{evaluation} without overlapping. In each split, \textit{95\%} of the \textit{training data} was used for \textit{training} and the remaining \textit{5\%} for \textit{evaluation} to determine the \textit{hyperparameters}. The \textit{hyperparameter search} was performed as mentioned in \textit{section} \ref{sec:sgd} using the \textit{grid} $(\lambda \in \{0.02, 0.03, 0.04, 0.05\}, \gamma \in \{0.001, 0.003\})$. This grid was inspired by findings during the \textit{Netflix-Prize} \citep{Kor08, Paterek07}. In total the parameters $\lambda=0.04$ and $\gamma=0.003$ could be determined. Afterwards both \textit{learning methods} and their settings were compared. The \textit{RMSE} was plotted against the used \textit{dimension} $f$ of $p_u, q_i \in \mathbb{R}^f$. \textit{Figure} \ref{fig:battle} shows the corresponding results.
 \input{battle}
-
+\newpage
 As a \textit{first intermediate result} of the preparation it can be stated that both \textit{SGD} and \textit{gibbs-samper} achieve better \textit{RMSE values} for increasing \textit{dimensional embedding}.
 
 In addition, it can be stated that learning using the \textit{bayesian approach} is better than learning using \textit{SGD}. Even if the results could be different due to more efficient setups, it is still surprising that \textit{SGD} is worse than the \textit{bayesian approach}, although the \textit{exact opposite} was reported for \textit{MovieLens10M}. For example, \textit{figure} \ref{fig:reported_results} shows that the \textit{bayesian approach BPMF} achieved an \textit{RMSE} of \textit{0.8187} while the \textit{SGD approach Biased MF} performed better with \textit{0.803}. The fact that the \textit{bayesian approach} outperforms \textit{SGD} has already been reported and validated by \citet{Rendle13}, \citet{Rus08} for the \textit{Netflix-Prize-dataset}. Looking more closely at \textit{figures} \ref{fig:reported_results} and \ref{fig:battle}, the \textit{bayesian approach} scores better than the reported \textit{BPMF} and \textit{Biased MF} for each \textit{dimensional embedding}. Moreover, it even beats all reported baselines and new methods. Building on this, the authors have gone into the detailed examination of the methods and baselines.
@@ -40,9 +40,15 @@ For the actual execution of the experiment, the \textit{authors} used the knowle
 As the \textit{Netflix-Prize} showed, the use of \textit{implicit data} such as \textit{time} or \textit{dependencies} between \textit{users} or \textit{items} could \textit{immensely improve existing models}. In addition to the two \textit{simple matrix factorizations}, \textit{table} \ref{table:models} shows the \textit{extensions} of the \textit{authors} regarding the \textit{bayesian approach}.
 
 \input{model_table}
-As it turned out that the \textit{bayesian approach} gave more promising results, the given models were trained with it. For this purpose, the \textit{dimensional embedding} as well as the \textit{number of sampling steps} for the models were examined again. Again the \textit{gaussian normal distribution} was used for \textit{initialization} as indicated in \textit{section} \ref{sec:experiment_preparation} . \textit{Figure} XY shows the corresponding results.
+As it turned out that the \textit{bayesian approach} gave more promising results, the given models were trained with it. For this purpose, the \textit{dimensional embedding} as well as the \textit{number of sampling steps} for the models were examined again. Again the \textit{gaussian normal distribution} was used for \textit{initialization} as indicated in \textit{section} \ref{sec:experiment_preparation}. \textit{Figure} \ref{fig:bayes_evaluation} shows the corresponding results.
+\input{bayes_evaluation}
 
 \subsection{Obeservations}
+The first observation that emerges from \textit{figure} \ref{fig:bayes_sampling_steps} is that the \textit{increase} in \textit{sampling steps} with a \textit{fixed dimensional embedding} also results in an \textit{improvement} in \textit{RMSE} for all models. Based on this, \textit{figure} \ref{fig:bayes_dimensional_embeddings} also shows that an \textit{increase} in the \textit{dimensional embedding} for \textit{512 sampling steps} also leads to an \textit{improvement} in the \textit{RMSE} for all models. Thus, both the \textit{number of sampling steps} and the size of the \textit{dimensional embedding} are involved in the \textit{RMSE} of \textit{matrix-factorization models} when they are trained using the \textit{bayesian approach}.
+
+As a second finding, the \textit{RMSE values} of the created models can be taken from \textit{figure} \ref{fig:bayes_dimensional_embeddings}. Several points can be addressed. Firstly, it can be seen that the \textit{individual inclusion} of \textit{implicit knowledge} such as \textit{time} or \textit{user behaviour} leads to a significant \textit{improvement} in the \textit{RMSE}. For example, models like \textit{bayesian timeSVD (0.7587)} and \textit{bayesian SVD++ (0.7563)}, which already use single implicit knowledge, beat the \textit{simple bayesian MF} with an \textit{RMSE} of \textit{0.7633}. In addition, it also shows that the \textit{combination} of \textit{implicit data} further improves the \textit{RMSE}. \textit{Bayesian timeSVD++} achieves an \textit{RMSE} of \textit{0.7523}. Finally, \textit{bayesian timeSVD++ flipped} can achieve an \textit{RMSE} of \textit{0.7485} by adding \textit{more implicit data}.
+This results in the third and most significant observation of the experiment. Firstly, the \textit{simple bayesian MF} with an \textit{RMSE} of \textit{0.7633} already beat the best method \textit{MRMA} with an \textit{RMSE} of \textit{0.7634}. Furthermore, the best method \textit{MRMA} could be surpassed with \textit{bayesian timeSVD++} by 0.0149 with respect to the \textit{RMSE}. Such a result is astonishing, as it took \textit{one year} during the \textit{Netflix-Prize} to reduce the leading \textit{RMSE} from \textit{0.8712 (progress award 2007)} to \textit{0.8616 (progress award 2008)}. Additionally, this result is remarkable as it \textit{challenges} the \textit{last 5 years} of research on the \textit{MovieLens10M-dataset}. Based on the results obtained, the \textit{authors} see the first problem with the \textit{results} achieved on the \textit{MovieLens10M-dataset} as being that they were \textit{compared against} too \textit{weak baselines}.
+
 \subsubsection{Stronger Baselines}
 \subsubsection{Reproducability}
 \subsubsection{Inadequate validations}
\ No newline at end of file
diff --git a/battle.tex b/battle.tex
index 6826865056f3626b99abb6e19c6fa81d0e67fdfc..fabf4e946d4fcc59be1c9c3f68741f92015fc190 100644
--- a/battle.tex
+++ b/battle.tex
@@ -1,6 +1,6 @@
 \begin{figure}[!ht]
   \centering
-    \includegraphics[scale=0.37]{Bilder/battle.png}
+    \includegraphics[scale=0.60]{Bilder/battle.png}
   \caption{Comparison of \textit{matrix-factorization} learned by \textit{gibbs-sampling (bayesian learning)} and \textit{stochastic gradient descent (SGD)} for an \textit{embedding dimension} from \textit{16} to \textit{512}.
 }
 \label{fig:battle}
diff --git a/bayes_evaluation.tex b/bayes_evaluation.tex
new file mode 100644
index 0000000000000000000000000000000000000000..2c5c235f030db2bb404707587e243d4d2eada8e4
--- /dev/null
+++ b/bayes_evaluation.tex
@@ -0,0 +1,15 @@
+\begin{figure}[!ht]
+  \centering
+  \begin{subfigure}[b]{0.45\linewidth}
+    \includegraphics[width=\linewidth]{Bilder/bayes_sampling_steps.png}
+    \caption{\textit{RMSE} vs. \textit{sampling steps}}
+    \label{fig:bayes_sampling_steps}
+  \end{subfigure}
+  \begin{subfigure}[b]{0.45\linewidth}
+    \includegraphics[width=\linewidth]{Bilder/bayes_dimensional_embedding.png}
+    \caption{\textit{RMSE} vs. \textit{dimensional embedding}}
+    \label{fig:bayes_dimensional_embeddings}
+  \end{subfigure}
+  \caption{Final evaluation of the \textit{number of sampling steps} and \textit{dimensional embedding} for the designed models. \textit{Figure} \ref{fig:bayes_sampling_steps} shows the \textit{number of sampling steps} with a \textit{dimensional embedding} of \textit{128} against the corresponding \textit{RMSE}.  \textit{Figure} \ref{fig:bayes_dimensional_embeddings} shows the \textit{RMSE} generated by \textit{512 sampling steps} with \textit{variable dimensional embedding}.}
+  \label{fig:bayes_evaluation}
+\end{figure}
\ No newline at end of file
diff --git a/reported_results.tex b/reported_results.tex
index 924967d7665a1802fd0408dadd35fdf68c5210f6..aed2f9c0c39928ee521bd3ea186a170fafd004b7 100644
--- a/reported_results.tex
+++ b/reported_results.tex
@@ -1,6 +1,6 @@
 \begin{figure}[!ht]
   \centering
-    \includegraphics[scale=0.25]{Bilder/reported_results.png}
+    \includegraphics[scale=0.60]{Bilder/reported_results.png}
   \caption{\textit{Results obtained} on the \textit{MovieLens10M-dataset} over the last \textit{5 years}. The \textit{y-axis} shows the corresponding \textit{RMSE} values and the \textit{x-axis} shows the \textit{year} in which the corresponding method was developed. \textit{Blue} marked points show \textit{newer methods} that have \textit{competed} against the points shown in \textit{black}. \citep{Rendle19}}
   \label{fig:reported_results}
 \end{figure}
\ No newline at end of file
diff --git a/submission.pdf b/submission.pdf
index f072dc6532dcb43d4107451fee2f931ef750412c..2e17f2aab795181730de774b9cc8077f0df3f68f 100644
Binary files a/submission.pdf and b/submission.pdf differ