diff --git a/bilder/pngfuel-friends.png b/bilder/pngfuel-friends.png
new file mode 100644
index 0000000000000000000000000000000000000000..563466b31527ab765bac22cd92dd7a84d012da15
Binary files /dev/null and b/bilder/pngfuel-friends.png differ
diff --git a/references.bib b/references.bib
index ffcbcb7ed3d5ce1b93d09c05ba1d9785fadc2c5b..99cd50580b0ee4a621da3fd358622c2232336e8e 100755
--- a/references.bib
+++ b/references.bib
@@ -45,4 +45,45 @@
 	numpages = {6},
 	location = {Las Cruces, New Mexico},
 	series = {ACL ’94}
+}
+@inproceedings{pennington2014glove,
+	title = {{G}love: Global Vectors for Word Representation},
+	author = {Pennington, Jeffrey and Socher, Richard and Manning, Christopher},
+	booktitle = {Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing ({EMNLP})},
+	month = {10},
+	year = {2014},
+	address = {Doha, Qatar},
+	publisher = {Association for Computational Linguistics},
+	url = {https://doi.org/10.3115/v1/D14-1162},
+	doi = {10.3115/v1/D14-1162},
+	pages = "1532--1543"
+}
+@article{Peters:2018ELMo,
+	author    = {Matthew E. Peters and Mark Neumann and Mohit Iyyer and Matt Gardner and Christopher Clark and Kenton Lee},
+	title     = {Deep contextualized word representations},
+	journal   = {CoRR},
+	year      = {2018},
+	url       = {http://arxiv.org/abs/1802.05365},
+	archivePrefix = {arXiv},
+	eprint    = {1802.05365}
+}
+@article{devlin2018bert,
+	author    = {Jacob Devlin and Ming{-}Wei Chang and Kenton Lee and Kristina Toutanova},
+	title     = {{BERT:} Pre-training of Deep Bidirectional Transformers for Language Understanding},
+	journal   = {CoRR},
+	year      = {2018},
+	url       = {http://arxiv.org/abs/1810.04805},
+	archivePrefix = {arXiv},
+	eprint    = {1810.04805}
+}
+@inproceedings{Armand:2017FastText,
+    title = {Bag of Tricks for Efficient Text Classification},
+	author = {Joulin, Armand  and Grave, Edouard  and Bojanowski, Piotr  and Mikolov, Tomas},
+	booktitle = {Proceedings of the 15th Conference of the {E}uropean Chapter of the Association for Computational Linguistics},
+	month = {04},
+	year = {2017},
+	address = {Valencia, Spain},
+	publisher = {Association for Computational Linguistics},
+	url = {https://doi.org/10.18653/v1/E17-2068},
+	pages = {427--431}
 }
\ No newline at end of file
diff --git a/slides/methods.tex b/slides/methods.tex
index db6338a7198175fcbff54fa41701216b27eb25d5..8d30c13b46e0b6eab2f6bfede9ecaaa93d0e9778 100644
--- a/slides/methods.tex
+++ b/slides/methods.tex
@@ -51,23 +51,27 @@
 	\begin{itemize}
 		\item For ranking the arguments, we measured the semantic similarity
 		      between the premises and conclusions
-		\item Each argument was embedded word-wise in an averaged vector space
-		\item The resulting similarity was calculated by using $cos(c, p)$
-		\item In the course of this experiment, we used three different embeddings
-		      \begin{itemize}
-			      \item BERT\footnote{J. Devlin, M. Chang, K. Lee, and K. Toutanova, “BERT: pre-training of deep bidirectional transformers
-for language understanding,”}
-			      \item ELMo\footnote{M. E. Peters, M. Neumann, M. Iyyer, M. Gardner, C. Clark, K. Lee, and L. Zettlemoyer, “Deep
-contextualized word representations,”}
-			      \item GloVe\footnote{J. Pennington, R. Socher, and C. Manning, “Glove: Global vectors for word representation,”}
-		      \end{itemize}
+		\item Argument were embedded word-wise in an averaged vector space
+		\item The resulting similarity was calculated by using $Cos(c, p)$
 	\end{itemize}
+	\begin{block}{Embeddings used}
+		BERT by \cite{devlin2018bert}
+
+		ELMo by \cite{Peters:2018ELMo}
+
+		GloVe by \cite{pennington2014glove}
+	\end{block}
 \end{frame}
 \begin{frame}
 	\frametitle{Sentiment}
-	\begin{itemize}
-		\item As another approach we used to measure the positivity of the argument
-		\item Therefore, we used a sentiment neural network based on FastText\footnote{A. Joulin, E. Grave, P. Bojanowski, and T. Mikolov, “Bag of tricks for efficient text classification,”}, which was
-		      trained on film ratings of IMDb
-	\end{itemize}
+	\begin{columns}
+		\column{0.6\textwidth}
+		\begin{itemize}
+			\item As another approach we used to measure the positivity of the argument
+			\item We used a neural network based on FastText by \cite{Armand:2017FastText}
+			\item The neural network was trained to indicate the sentiment of IMDb film ratings
+		\end{itemize}
+		\column{0.4\textwidth}
+			\includegraphics[scale=0.1]{bilder/pngfuel-friends.png}
+		\end{columns}
 \end{frame}
\ No newline at end of file