diff --git a/slides/dataset.tex b/slides/dataset.tex
index fe32254d2ca77dc6365f411e017616d8e52b4759..395d9f5383864001e03be6a48f6db82fc59d86a6 100644
--- a/slides/dataset.tex
+++ b/slides/dataset.tex
@@ -17,15 +17,11 @@
 
 \begin{frame}
 	\frametitle{Corpus}
+	\textbf{The argument graph} $G = (A, E)$ \textbf{ is a directed graph with:}
 	\begin{itemize}
-		\item In the resulting argument graph G = (A, E)
-		      \begin{itemize}
-			      \item Each node represents an argument $a_i \in A$ consisting of a conclusion
-			            $c_i$ and a not-empty set of premises $P_i$ $\Rightarrow$ $a_i = \langle c_i, P_i \rangle$
-			      \item An edge $(a_j, a_i)$ is given if the conclusion $a_j$ is used as a premise
-			            of $a_i$
-			      \item Consequently, $P_i = \{c_1,...,c_k\}, k \geq 1$
-		      \end{itemize}
+		\item Each argument-node $a_i \in A$ consisting of a conclusion $c_i$ and a not-empty set of premises $P_i$ $\Rightarrow$ $a_i = \langle c_i, P_i \rangle$
+		\item An edge $(a_j, a_i)$ is given if the conclusion $a_j$ is used as a premise of $a_i$
+		\item Consequently, $P_i = \{c_1,...,c_k\}, k \geq 1$
 	\end{itemize}
 	\begin{figure}
 		\includegraphics[width=0.7\linewidth]{bilder/DatasetLocalView2.png}
@@ -33,41 +29,25 @@
 	\end{figure}
 \end{frame}
 
-
 \subsection{Benchmark Argument Ranking}
 \begin{frame}
 	\frametitle{Benchmark Argument Ranking}
-	\cite{wachsmuth:2017a} kept those arguments fulfilling:
+	\textbf{\cite{wachsmuth:2017a} kept those arguments fulfilling:}
 	\begin{itemize}
 		\item The conclusion was part in more than one argument
 		\item Real claim
 	\end{itemize}
-	Additionally, an argument had to be:
+	\textbf{Additionally, an argument had to be:}
 	\begin{itemize}
 		\item a valid counterargument
 		\item based on reasonable premises
 		\item logically consistent
 	\end{itemize}
-\end{frame}
-
-\begin{frame}
-	\frametitle{Benchmark Argument Ranking}
-	\begin{itemize}
-		\item The resulting benchmark dataset consist of 32 conclusions which
-		      participated in 110 arguments
-		\item Those 110 arguments were ranked by seven experts from computational
-		      linguistics and information retrieval
-		\item Each argument was ranked by how much each of its premises contributes to
-		      the acceptance or rejection of the conclusion
-	\end{itemize}
-\end{frame}
-
+	\begin{block}{In total}
+		110 argument with a total of 32 conclusions were ranked by 7 experts.
+		
+		Each annotated how much the premises contributed to the acceptance of the conclusion.
 
-\subsection{Evaluation Method}
-\begin{frame}
-	\frametitle{Evaluation Method}
-	\begin{itemize}
-		\item \cite{wachsmuth:2017a} used Kendall's $\tau \in [-1, 1]$ to evaluate the agreement between the experts and to ensure comparability
-		\item The mean over all experts for the evaluation of the benchmark is $\tau \approx 0.36$
-	\end{itemize}
+		All experts agreed with Kendall's $\tau \approx 0.36$.
+	\end{block}
 \end{frame}
\ No newline at end of file