diff --git a/master.tex b/master.tex
index eb558490fc2355a76e1f0acfa6dd985fe05e8e7f..2a4786fde21663b5f539724ed6f217660356f7ee 100755
--- a/master.tex
+++ b/master.tex
@@ -21,8 +21,7 @@
 \usepackage{mathabx}
 \usepackage[linesnumbered,algoruled,boxed,lined]{algorithm2e}
 \usepackage{amssymb}
-\usepackage[square,numbers]{natbib}
-
+\usepackage[longnamesfirst, authoryear]{natbib}
 \usepackage{textcomp}
 \usepackage{listings}
 \usepackage{color}
@@ -183,7 +182,7 @@ Department for Computer-Networks and Communication-Systems}
 
 % % % % % % % % % Ende der eingefügten LaTeX-Dateien % % % % % % % % %
 \begin{frame}[allowframebreaks]\frametitle{References}
-	\bibliographystyle{abbrvnat}
+	\bibliographystyle{plainnat}
 	\bibliography{references}
 	\nocite{*} 
 \end{frame}
diff --git a/slides/dataset.tex b/slides/dataset.tex
index 6ede38104f284e6aeea98f55304487d9e2ccfeaa..c3a10e655e2b3f3122ed2dab21e5490acafa5245 100644
--- a/slides/dataset.tex
+++ b/slides/dataset.tex
@@ -4,16 +4,10 @@
 \begin{frame}
 	\frametitle{Corpus}
 	\begin{itemize}
-		\item For our study, we used the Webis-ArgRank2017 dataset from Wachsmuth et
-		      al.\footnote[1]{H. Wachsmuth, B. Stein, and Y. Ajjour "PageRank" for Argument
-			      Relevance}
-		\item In this dataset Wachsmuth et al. constructed a ground-truth argument graph
-		      as well as benchmark for argument ranking
-		\item The data are originally collected from the Argument Web and stored in an
-		      argument graph
-		\item The Argument Web was the largest existing argument database with a
-		      structured argument corpora at that time
-
+		\item For our study, we used the Webis-ArgRank2017 dataset by \cite{wachsmuth:2017a}.
+		\item Webis-ArgRank2017 contains a ground-truth argument graph
+		      as well as a benchmark for argument ranking
+		\item The data was originally collected within the Argument-Web
 	\end{itemize}
 \end{frame}
 
@@ -22,17 +16,16 @@
 	\begin{itemize}
 		\item In the resulting argument graph G = (A, E)
 		      \begin{itemize}
-			      \item Each node represents $a_i \in A$ an argument consisting of a conclusion
+			      \item Each node represents an argument $a_i \in A$ consisting of a conclusion
 			            $c_i$ and a not-empty set of premises $P_i$ $\Rightarrow$ $a_i = \langle c_i, P_i \rangle$
 			      \item An edge $(a_j, a_i)$ is given if the conclusion $a_j$ is used as a premise
 			            of $a_i$
 			      \item Consequently, $P_i = \{c_1,...,c_k\}, k \geq 1$
 		      \end{itemize}
 	\end{itemize}
-
 	\begin{figure}
-		\includegraphics[width=0.4\linewidth]{bilder/DatasetLocalView2.png}
-		\caption{Argument Graph from Argument Web}
+		\includegraphics[width=0.7\linewidth]{bilder/DatasetLocalView2.png}
+		\caption{Part of the Argument-Web used to create Webis-ArgRank2017.}
 	\end{figure}
 \end{frame}
 
@@ -40,20 +33,16 @@
 \subsection{Benchmark Argument Ranking}
 \begin{frame}
 	\frametitle{Benchmark Argument Ranking}
+	\cite{wachsmuth:2017a} kept those arguments fulfilling:
 	\begin{itemize}
-		\item To create the benchmark dataset, Wachsmuth et al. only kept arguments
-		      from the graph that fulfill their requirements
-		      \begin{itemize}
-			      \item A conclusion must be part in more than one argument
-			      \item All nodes are removed that do not contain a
-			            real claim
-			      \item Additionally, an argument:
-			            \begin{itemize}
-				            \item has to be a valid-counter argument
-				            \item must be based on reasonable premises
-				            \item must  allow a logic interference to be drawn
-			            \end{itemize}
-		      \end{itemize}
+		\item The conclusion was part in more than one argument
+		\item Real claim
+	\end{itemize}
+	Additionally, an argument had to be:
+	\begin{itemize}
+		\item a valid counterargument
+		\item based on reasonable premises
+		\item logically consistent
 	\end{itemize}
 \end{frame}
 
@@ -74,15 +63,7 @@
 \begin{frame}
 	\frametitle{Evaluation Method}
 	\begin{itemize}
-\item Wachsmuth et al. used Kendall's $\tau$ to evaluate the agreement between
-the experts and to ensure comparability
-		\item Kendall $\tau$ is a correlation coefficient that indicates the agreement
-		      between two quantities with respect to a property
-		      \begin{itemize}
-			      \item In this case, this means the agreement between two experts with respect to
-			            an argument
-			      \item -1 signifies a complete disagreement and +1 a complete agreement
-		      \end{itemize}
-		\item The mean over all experts for the evaluation of the benchmark is 0.36
+		\item \cite{wachsmuth:2017a} used Kendall's $\tau \in [-1, 1]$ to evaluate the agreement between the experts and to ensure comparability
+		\item The mean over all experts for the evaluation of the benchmark is $\tau \approx 0.36$
 	\end{itemize}
 \end{frame}
\ No newline at end of file