@inproceedings{lal-etal-2024-automated,
title = "Automated Adversarial Discovery for Safety Classifiers",
author = "Lal, Yash Kumar and
Lahoti, Preethi and
Sinha, Aradhana and
Qin, Yao and
Balashankar, Ananth",
editor = "Ovalle, Anaelia and
Chang, Kai-Wei and
Cao, Yang Trista and
Mehrabi, Ninareh and
Zhao, Jieyu and
Galstyan, Aram and
Dhamala, Jwala and
Kumar, Anoop and
Gupta, Rahul",
booktitle = "Proceedings of the 4th Workshop on Trustworthy Natural Language Processing (TrustNLP 2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.trustnlp-1.2",
doi = "10.18653/v1/2024.trustnlp-1.2",
pages = "13--26",
abstract = "Safety classifiers are critical in mitigating toxicity on online forums such as social media and in chatbots. Still, they continue to be vulnerable to emergent, and often innumerable, adversarial attacks.Traditional automated adversarial data generation methods, however, tend to produce attacks that are not diverse, but variations of previously observed harm types.We formalize the task of automated adversarial discovery for safety classifiers - to find new attacks along previously unseen harm dimensions that expose new weaknesses in the classifier.We measure progress on this task along two key axes (1) adversarial success: does the attack fool the classifier? and (2) dimensional diversity: does the attack represent a previously unseen harm type?Our evaluation of existing attack generation methods on the CivilComments toxicity task reveals their limitations: Word perturbation attacks fail to fool classifiers, while prompt-based LLM attacks have more adversarial success, but lack dimensional diversity.Even our best-performing prompt-based method finds new successful attacks on unseen harm dimensions of attacks only 5{\%} of the time.Automatically finding new harmful dimensions of attack is crucial and there is substantial headroom for future research on our new task.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lal-etal-2024-automated">
<titleInfo>
<title>Automated Adversarial Discovery for Safety Classifiers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yash</namePart>
<namePart type="given">Kumar</namePart>
<namePart type="family">Lal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Preethi</namePart>
<namePart type="family">Lahoti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aradhana</namePart>
<namePart type="family">Sinha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yao</namePart>
<namePart type="family">Qin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ananth</namePart>
<namePart type="family">Balashankar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 4th Workshop on Trustworthy Natural Language Processing (TrustNLP 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anaelia</namePart>
<namePart type="family">Ovalle</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kai-Wei</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="given">Trista</namePart>
<namePart type="family">Cao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ninareh</namePart>
<namePart type="family">Mehrabi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jieyu</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aram</namePart>
<namePart type="family">Galstyan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jwala</namePart>
<namePart type="family">Dhamala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anoop</namePart>
<namePart type="family">Kumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rahul</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Safety classifiers are critical in mitigating toxicity on online forums such as social media and in chatbots. Still, they continue to be vulnerable to emergent, and often innumerable, adversarial attacks.Traditional automated adversarial data generation methods, however, tend to produce attacks that are not diverse, but variations of previously observed harm types.We formalize the task of automated adversarial discovery for safety classifiers - to find new attacks along previously unseen harm dimensions that expose new weaknesses in the classifier.We measure progress on this task along two key axes (1) adversarial success: does the attack fool the classifier? and (2) dimensional diversity: does the attack represent a previously unseen harm type?Our evaluation of existing attack generation methods on the CivilComments toxicity task reveals their limitations: Word perturbation attacks fail to fool classifiers, while prompt-based LLM attacks have more adversarial success, but lack dimensional diversity.Even our best-performing prompt-based method finds new successful attacks on unseen harm dimensions of attacks only 5% of the time.Automatically finding new harmful dimensions of attack is crucial and there is substantial headroom for future research on our new task.</abstract>
<identifier type="citekey">lal-etal-2024-automated</identifier>
<identifier type="doi">10.18653/v1/2024.trustnlp-1.2</identifier>
<location>
<url>https://aclanthology.org/2024.trustnlp-1.2</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>13</start>
<end>26</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Automated Adversarial Discovery for Safety Classifiers
%A Lal, Yash Kumar
%A Lahoti, Preethi
%A Sinha, Aradhana
%A Qin, Yao
%A Balashankar, Ananth
%Y Ovalle, Anaelia
%Y Chang, Kai-Wei
%Y Cao, Yang Trista
%Y Mehrabi, Ninareh
%Y Zhao, Jieyu
%Y Galstyan, Aram
%Y Dhamala, Jwala
%Y Kumar, Anoop
%Y Gupta, Rahul
%S Proceedings of the 4th Workshop on Trustworthy Natural Language Processing (TrustNLP 2024)
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F lal-etal-2024-automated
%X Safety classifiers are critical in mitigating toxicity on online forums such as social media and in chatbots. Still, they continue to be vulnerable to emergent, and often innumerable, adversarial attacks.Traditional automated adversarial data generation methods, however, tend to produce attacks that are not diverse, but variations of previously observed harm types.We formalize the task of automated adversarial discovery for safety classifiers - to find new attacks along previously unseen harm dimensions that expose new weaknesses in the classifier.We measure progress on this task along two key axes (1) adversarial success: does the attack fool the classifier? and (2) dimensional diversity: does the attack represent a previously unseen harm type?Our evaluation of existing attack generation methods on the CivilComments toxicity task reveals their limitations: Word perturbation attacks fail to fool classifiers, while prompt-based LLM attacks have more adversarial success, but lack dimensional diversity.Even our best-performing prompt-based method finds new successful attacks on unseen harm dimensions of attacks only 5% of the time.Automatically finding new harmful dimensions of attack is crucial and there is substantial headroom for future research on our new task.
%R 10.18653/v1/2024.trustnlp-1.2
%U https://aclanthology.org/2024.trustnlp-1.2
%U https://doi.org/10.18653/v1/2024.trustnlp-1.2
%P 13-26
Markdown (Informal)
[Automated Adversarial Discovery for Safety Classifiers](https://aclanthology.org/2024.trustnlp-1.2) (Lal et al., TrustNLP-WS 2024)
ACL
- Yash Kumar Lal, Preethi Lahoti, Aradhana Sinha, Yao Qin, and Ananth Balashankar. 2024. Automated Adversarial Discovery for Safety Classifiers. In Proceedings of the 4th Workshop on Trustworthy Natural Language Processing (TrustNLP 2024), pages 13–26, Mexico City, Mexico. Association for Computational Linguistics.