Commit e77b439a authored by Marc Feger's avatar Marc Feger
Browse files

Refactor methods.tex

parent 3afad534
Pipeline #44717 passed with stage
in 2 minutes and 27 seconds
......@@ -45,4 +45,45 @@
numpages = {6},
location = {Las Cruces, New Mexico},
series = {ACL ’94}
}
@inproceedings{pennington2014glove,
title = {{G}love: Global Vectors for Word Representation},
author = {Pennington, Jeffrey and Socher, Richard and Manning, Christopher},
booktitle = {Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing ({EMNLP})},
month = {10},
year = {2014},
address = {Doha, Qatar},
publisher = {Association for Computational Linguistics},
url = {https://doi.org/10.3115/v1/D14-1162},
doi = {10.3115/v1/D14-1162},
pages = "1532--1543"
}
@article{Peters:2018ELMo,
author = {Matthew E. Peters and Mark Neumann and Mohit Iyyer and Matt Gardner and Christopher Clark and Kenton Lee},
title = {Deep contextualized word representations},
journal = {CoRR},
year = {2018},
url = {http://arxiv.org/abs/1802.05365},
archivePrefix = {arXiv},
eprint = {1802.05365}
}
@article{devlin2018bert,
author = {Jacob Devlin and Ming{-}Wei Chang and Kenton Lee and Kristina Toutanova},
title = {{BERT:} Pre-training of Deep Bidirectional Transformers for Language Understanding},
journal = {CoRR},
year = {2018},
url = {http://arxiv.org/abs/1810.04805},
archivePrefix = {arXiv},
eprint = {1810.04805}
}
@inproceedings{Armand:2017FastText,
title = {Bag of Tricks for Efficient Text Classification},
author = {Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Mikolov, Tomas},
booktitle = {Proceedings of the 15th Conference of the {E}uropean Chapter of the Association for Computational Linguistics},
month = {04},
year = {2017},
address = {Valencia, Spain},
publisher = {Association for Computational Linguistics},
url = {https://doi.org/10.18653/v1/E17-2068},
pages = {427--431}
}
\ No newline at end of file
......@@ -51,23 +51,27 @@
\begin{itemize}
\item For ranking the arguments, we measured the semantic similarity
between the premises and conclusions
\item Each argument was embedded word-wise in an averaged vector space
\item The resulting similarity was calculated by using $cos(c, p)$
\item In the course of this experiment, we used three different embeddings
\begin{itemize}
\item BERT\footnote{J. Devlin, M. Chang, K. Lee, and K. Toutanova, “BERT: pre-training of deep bidirectional transformers
for language understanding,”}
\item ELMo\footnote{M. E. Peters, M. Neumann, M. Iyyer, M. Gardner, C. Clark, K. Lee, and L. Zettlemoyer, “Deep
contextualized word representations,”}
\item GloVe\footnote{J. Pennington, R. Socher, and C. Manning, “Glove: Global vectors for word representation,”}
\end{itemize}
\item Argument were embedded word-wise in an averaged vector space
\item The resulting similarity was calculated by using $Cos(c, p)$
\end{itemize}
\begin{block}{Embeddings used}
BERT by \cite{devlin2018bert}
ELMo by \cite{Peters:2018ELMo}
GloVe by \cite{pennington2014glove}
\end{block}
\end{frame}
\begin{frame}
\frametitle{Sentiment}
\begin{itemize}
\item As another approach we used to measure the positivity of the argument
\item Therefore, we used a sentiment neural network based on FastText\footnote{A. Joulin, E. Grave, P. Bojanowski, and T. Mikolov, “Bag of tricks for efficient text classification,”}, which was
trained on film ratings of IMDb
\end{itemize}
\begin{columns}
\column{0.6\textwidth}
\begin{itemize}
\item As another approach we used to measure the positivity of the argument
\item We used a neural network based on FastText by \cite{Armand:2017FastText}
\item The neural network was trained to indicate the sentiment of IMDb film ratings
\end{itemize}
\column{0.4\textwidth}
\includegraphics[scale=0.1]{bilder/pngfuel-friends.png}
\end{columns}
\end{frame}
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment