@inproceedings{antypas-camacho-collados-2023-robust,
title = "Robust Hate Speech Detection in Social Media: A Cross-Dataset Empirical Evaluation",
author = "Antypas, Dimosthenis and
Camacho-Collados, Jose",
editor = {Chung, Yi-ling and
R{{\textbackslash}"ottger}, Paul and
Nozza, Debora and
Talat, Zeerak and
Mostafazadeh Davani, Aida},
booktitle = "The 7th Workshop on Online Abuse and Harms (WOAH)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.woah-1.25",
doi = "10.18653/v1/2023.woah-1.25",
pages = "231--242",
abstract = "The automatic detection of hate speech online is an active research area in NLP. Most of the studies to date are based on social media datasets that contribute to the creation of hate speech detection models trained on them. However, data creation processes contain their own biases, and models inherently learn from these dataset-specific biases. In this paper, we perform a large-scale cross-dataset comparison where we fine-tune language models on different hate speech detection datasets. This analysis shows how some datasets are more generalizable than others when used as training data. Crucially, our experiments show how combining hate speech detection datasets can contribute to the development of robust hate speech detection models. This robustness holds even when controlling by data size and compared with the best individual datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="antypas-camacho-collados-2023-robust">
<titleInfo>
<title>Robust Hate Speech Detection in Social Media: A Cross-Dataset Empirical Evaluation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dimosthenis</namePart>
<namePart type="family">Antypas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jose</namePart>
<namePart type="family">Camacho-Collados</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>The 7th Workshop on Online Abuse and Harms (WOAH)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yi-ling</namePart>
<namePart type="family">Chung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paul</namePart>
<namePart type="family">R\textbackslash”ottger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Debora</namePart>
<namePart type="family">Nozza</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zeerak</namePart>
<namePart type="family">Talat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aida</namePart>
<namePart type="family">Mostafazadeh Davani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The automatic detection of hate speech online is an active research area in NLP. Most of the studies to date are based on social media datasets that contribute to the creation of hate speech detection models trained on them. However, data creation processes contain their own biases, and models inherently learn from these dataset-specific biases. In this paper, we perform a large-scale cross-dataset comparison where we fine-tune language models on different hate speech detection datasets. This analysis shows how some datasets are more generalizable than others when used as training data. Crucially, our experiments show how combining hate speech detection datasets can contribute to the development of robust hate speech detection models. This robustness holds even when controlling by data size and compared with the best individual datasets.</abstract>
<identifier type="citekey">antypas-camacho-collados-2023-robust</identifier>
<identifier type="doi">10.18653/v1/2023.woah-1.25</identifier>
<location>
<url>https://aclanthology.org/2023.woah-1.25</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>231</start>
<end>242</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Robust Hate Speech Detection in Social Media: A Cross-Dataset Empirical Evaluation
%A Antypas, Dimosthenis
%A Camacho-Collados, Jose
%Y Chung, Yi-ling
%Y R\textbackslash”ottger, Paul
%Y Nozza, Debora
%Y Talat, Zeerak
%Y Mostafazadeh Davani, Aida
%S The 7th Workshop on Online Abuse and Harms (WOAH)
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F antypas-camacho-collados-2023-robust
%X The automatic detection of hate speech online is an active research area in NLP. Most of the studies to date are based on social media datasets that contribute to the creation of hate speech detection models trained on them. However, data creation processes contain their own biases, and models inherently learn from these dataset-specific biases. In this paper, we perform a large-scale cross-dataset comparison where we fine-tune language models on different hate speech detection datasets. This analysis shows how some datasets are more generalizable than others when used as training data. Crucially, our experiments show how combining hate speech detection datasets can contribute to the development of robust hate speech detection models. This robustness holds even when controlling by data size and compared with the best individual datasets.
%R 10.18653/v1/2023.woah-1.25
%U https://aclanthology.org/2023.woah-1.25
%U https://doi.org/10.18653/v1/2023.woah-1.25
%P 231-242
Markdown (Informal)
[Robust Hate Speech Detection in Social Media: A Cross-Dataset Empirical Evaluation](https://aclanthology.org/2023.woah-1.25) (Antypas & Camacho-Collados, WOAH 2023)
ACL