diff --git a/slides/methods.tex b/slides/methods.tex
index 8294760da4b737e7bc823a5f92ae611ff9fbb547..cbbcf37e99994290e84f22b0ab9e496e1b5d8959 100644
--- a/slides/methods.tex
+++ b/slides/methods.tex
@@ -1,5 +1,32 @@
 \section{Methods}
-\subsection{Methods}
+\subsection{Baseline Methods}
 \begin{frame}
-	This is the third slide \cite{wachsmuth:2017a}.
+	\frametitle{Similarity}
+	\begin{itemize}
+		\item  For the ranking of arguments, we measured the semantic similarity
+		      between premise and conclusion
+		\item Here each word of the argument in embedded in a vector space and then the
+		      average of the vectors of the argument is calculated
+		\item The similarity of a premise and a conclusion is the calculated by the
+		      angle between them
+		\item In the course of this experiment, we used three different embeddings
+		      \begin{itemize}
+			      \item BERT\footnote{J. Devlin, M. Chang, K. Lee, and K. Toutanova, “BERT: pre-training of deep bidirectional transformers
+for language understanding,”}
+			      \item Elmo\footnote{M. E. Peters, M. Neumann, M. Iyyer, M. Gardner, C. Clark, K. Lee, and L. Zettlemoyer, “Deep
+contextualized word representations,”}
+			      \item Glove\footnote{J. Pennington, R. Socher, and C. Manning, “Glove: Global vectors for word representation,”}
+		      \end{itemize}
+	\end{itemize}
+\end{frame}
+
+
+\begin{frame}
+	\frametitle{Sentiment}
+	\begin{itemize}
+		\item Another approach to rank the argument is to measure how positive the tone
+		      of the premises is
+		\item For this, we use a sentiment neural network based on FastText\footnote{A. Joulin, E. Grave, P. Bojanowski, and T. Mikolov, “Bag of tricks for efficient text classification,”}, which was
+		      trained on film ratings of IMDb
+	\end{itemize}
 \end{frame}
\ No newline at end of file