@inproceedings{moradi-etal-2019-interrogating,
title = "Interrogating the Explanatory Power of Attention in Neural Machine Translation",
author = "Moradi, Pooya and
Kambhatla, Nishant and
Sarkar, Anoop",
editor = "Birch, Alexandra and
Finch, Andrew and
Hayashi, Hiroaki and
Konstas, Ioannis and
Luong, Thang and
Neubig, Graham and
Oda, Yusuke and
Sudoh, Katsuhito",
booktitle = "Proceedings of the 3rd Workshop on Neural Generation and Translation",
month = nov,
year = "2019",
address = "Hong Kong",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D19-5624",
doi = "10.18653/v1/D19-5624",
pages = "221--230",
abstract = "Attention models have become a crucial component in neural machine translation (NMT). They are often implicitly or explicitly used to justify the model{'}s decision in generating a specific token but it has not yet been rigorously established to what extent attention is a reliable source of information in NMT. To evaluate the explanatory power of attention for NMT, we examine the possibility of yielding the same prediction but with counterfactual attention models that modify crucial aspects of the trained attention model. Using these counterfactual attention mechanisms we assess the extent to which they still preserve the generation of function and content words in the translation process. Compared to a state of the art attention model, our counterfactual attention models produce 68{\%} of function words and 21{\%} of content words in our German-English dataset. Our experiments demonstrate that attention models by themselves cannot reliably explain the decisions made by a NMT model.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="moradi-etal-2019-interrogating">
<titleInfo>
<title>Interrogating the Explanatory Power of Attention in Neural Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Pooya</namePart>
<namePart type="family">Moradi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nishant</namePart>
<namePart type="family">Kambhatla</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anoop</namePart>
<namePart type="family">Sarkar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Neural Generation and Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexandra</namePart>
<namePart type="family">Birch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andrew</namePart>
<namePart type="family">Finch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hiroaki</namePart>
<namePart type="family">Hayashi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ioannis</namePart>
<namePart type="family">Konstas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thang</namePart>
<namePart type="family">Luong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Graham</namePart>
<namePart type="family">Neubig</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yusuke</namePart>
<namePart type="family">Oda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Katsuhito</namePart>
<namePart type="family">Sudoh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hong Kong</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Attention models have become a crucial component in neural machine translation (NMT). They are often implicitly or explicitly used to justify the model’s decision in generating a specific token but it has not yet been rigorously established to what extent attention is a reliable source of information in NMT. To evaluate the explanatory power of attention for NMT, we examine the possibility of yielding the same prediction but with counterfactual attention models that modify crucial aspects of the trained attention model. Using these counterfactual attention mechanisms we assess the extent to which they still preserve the generation of function and content words in the translation process. Compared to a state of the art attention model, our counterfactual attention models produce 68% of function words and 21% of content words in our German-English dataset. Our experiments demonstrate that attention models by themselves cannot reliably explain the decisions made by a NMT model.</abstract>
<identifier type="citekey">moradi-etal-2019-interrogating</identifier>
<identifier type="doi">10.18653/v1/D19-5624</identifier>
<location>
<url>https://aclanthology.org/D19-5624</url>
</location>
<part>
<date>2019-11</date>
<extent unit="page">
<start>221</start>
<end>230</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Interrogating the Explanatory Power of Attention in Neural Machine Translation
%A Moradi, Pooya
%A Kambhatla, Nishant
%A Sarkar, Anoop
%Y Birch, Alexandra
%Y Finch, Andrew
%Y Hayashi, Hiroaki
%Y Konstas, Ioannis
%Y Luong, Thang
%Y Neubig, Graham
%Y Oda, Yusuke
%Y Sudoh, Katsuhito
%S Proceedings of the 3rd Workshop on Neural Generation and Translation
%D 2019
%8 November
%I Association for Computational Linguistics
%C Hong Kong
%F moradi-etal-2019-interrogating
%X Attention models have become a crucial component in neural machine translation (NMT). They are often implicitly or explicitly used to justify the model’s decision in generating a specific token but it has not yet been rigorously established to what extent attention is a reliable source of information in NMT. To evaluate the explanatory power of attention for NMT, we examine the possibility of yielding the same prediction but with counterfactual attention models that modify crucial aspects of the trained attention model. Using these counterfactual attention mechanisms we assess the extent to which they still preserve the generation of function and content words in the translation process. Compared to a state of the art attention model, our counterfactual attention models produce 68% of function words and 21% of content words in our German-English dataset. Our experiments demonstrate that attention models by themselves cannot reliably explain the decisions made by a NMT model.
%R 10.18653/v1/D19-5624
%U https://aclanthology.org/D19-5624
%U https://doi.org/10.18653/v1/D19-5624
%P 221-230
Markdown (Informal)
[Interrogating the Explanatory Power of Attention in Neural Machine Translation](https://aclanthology.org/D19-5624) (Moradi et al., NGT 2019)
ACL