@inproceedings{goto-tanaka-2017-detecting,
title = "Detecting Untranslated Content for Neural Machine Translation",
author = "Goto, Isao and
Tanaka, Hideki",
editor = "Luong, Thang and
Birch, Alexandra and
Neubig, Graham and
Finch, Andrew",
booktitle = "Proceedings of the First Workshop on Neural Machine Translation",
month = aug,
year = "2017",
address = "Vancouver",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-3206",
doi = "10.18653/v1/W17-3206",
pages = "47--55",
abstract = "Despite its promise, neural machine translation (NMT) has a serious problem in that source content may be mistakenly left untranslated. The ability to detect untranslated content is important for the practical use of NMT. We evaluate two types of probability with which to detect untranslated content: the cumulative attention (ATN) probability and back translation (BT) probability from the target sentence to the source sentence. Experiments on detecting untranslated content in Japanese-English patent translations show that ATN and BT are each more effective than random choice, BT is more effective than ATN, and the combination of the two provides further improvements. We also confirmed the effectiveness of using ATN and BT to rerank the n-best NMT outputs.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="goto-tanaka-2017-detecting">
<titleInfo>
<title>Detecting Untranslated Content for Neural Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Isao</namePart>
<namePart type="family">Goto</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hideki</namePart>
<namePart type="family">Tanaka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Neural Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Thang</namePart>
<namePart type="family">Luong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexandra</namePart>
<namePart type="family">Birch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Graham</namePart>
<namePart type="family">Neubig</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andrew</namePart>
<namePart type="family">Finch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vancouver</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Despite its promise, neural machine translation (NMT) has a serious problem in that source content may be mistakenly left untranslated. The ability to detect untranslated content is important for the practical use of NMT. We evaluate two types of probability with which to detect untranslated content: the cumulative attention (ATN) probability and back translation (BT) probability from the target sentence to the source sentence. Experiments on detecting untranslated content in Japanese-English patent translations show that ATN and BT are each more effective than random choice, BT is more effective than ATN, and the combination of the two provides further improvements. We also confirmed the effectiveness of using ATN and BT to rerank the n-best NMT outputs.</abstract>
<identifier type="citekey">goto-tanaka-2017-detecting</identifier>
<identifier type="doi">10.18653/v1/W17-3206</identifier>
<location>
<url>https://aclanthology.org/W17-3206</url>
</location>
<part>
<date>2017-08</date>
<extent unit="page">
<start>47</start>
<end>55</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Detecting Untranslated Content for Neural Machine Translation
%A Goto, Isao
%A Tanaka, Hideki
%Y Luong, Thang
%Y Birch, Alexandra
%Y Neubig, Graham
%Y Finch, Andrew
%S Proceedings of the First Workshop on Neural Machine Translation
%D 2017
%8 August
%I Association for Computational Linguistics
%C Vancouver
%F goto-tanaka-2017-detecting
%X Despite its promise, neural machine translation (NMT) has a serious problem in that source content may be mistakenly left untranslated. The ability to detect untranslated content is important for the practical use of NMT. We evaluate two types of probability with which to detect untranslated content: the cumulative attention (ATN) probability and back translation (BT) probability from the target sentence to the source sentence. Experiments on detecting untranslated content in Japanese-English patent translations show that ATN and BT are each more effective than random choice, BT is more effective than ATN, and the combination of the two provides further improvements. We also confirmed the effectiveness of using ATN and BT to rerank the n-best NMT outputs.
%R 10.18653/v1/W17-3206
%U https://aclanthology.org/W17-3206
%U https://doi.org/10.18653/v1/W17-3206
%P 47-55
Markdown (Informal)
[Detecting Untranslated Content for Neural Machine Translation](https://aclanthology.org/W17-3206) (Goto & Tanaka, NGT 2017)
ACL