@inproceedings{akdemir-hurriyetoglu-2022-zero,
title = "Zero-Shot Ranking Socio-Political Texts with Transformer Language Models to Reduce Close Reading Time",
author = {Akdemir, Kiymet and
H{\"u}rriyeto{\u{g}}lu, Ali},
editor = {H{\"u}rriyeto{\u{g}}lu, Ali and
Tanev, Hristo and
Zavarella, Vanni and
Y{\"o}r{\"u}k, Erdem},
booktitle = "Proceedings of the 5th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.case-1.17",
doi = "10.18653/v1/2022.case-1.17",
pages = "124--132",
abstract = "We approach the classification problem as an entailment problem and apply zero-shot ranking to socio-political texts. Documents that are ranked at the top can be considered positively classified documents and this reduces the close reading time for the information extraction process. We use Transformer Language Models to get the entailment probabilities and investigate different types of queries. We find that DeBERTa achieves higher mean average precision scores than RoBERTa and when declarative form of the class label is used as a query, it outperforms dictionary definition of the class label. We show that one can reduce the close reading time by taking some percentage of the ranked documents that the percentage depends on how much recall they want to achieve. However, our findings also show that percentage of the documents that should be read increases as the topic gets broader.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="akdemir-hurriyetoglu-2022-zero">
<titleInfo>
<title>Zero-Shot Ranking Socio-Political Texts with Transformer Language Models to Reduce Close Reading Time</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kiymet</namePart>
<namePart type="family">Akdemir</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ali</namePart>
<namePart type="family">Hürriyetoğlu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 5th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ali</namePart>
<namePart type="family">Hürriyetoğlu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hristo</namePart>
<namePart type="family">Tanev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vanni</namePart>
<namePart type="family">Zavarella</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Erdem</namePart>
<namePart type="family">Yörük</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We approach the classification problem as an entailment problem and apply zero-shot ranking to socio-political texts. Documents that are ranked at the top can be considered positively classified documents and this reduces the close reading time for the information extraction process. We use Transformer Language Models to get the entailment probabilities and investigate different types of queries. We find that DeBERTa achieves higher mean average precision scores than RoBERTa and when declarative form of the class label is used as a query, it outperforms dictionary definition of the class label. We show that one can reduce the close reading time by taking some percentage of the ranked documents that the percentage depends on how much recall they want to achieve. However, our findings also show that percentage of the documents that should be read increases as the topic gets broader.</abstract>
<identifier type="citekey">akdemir-hurriyetoglu-2022-zero</identifier>
<identifier type="doi">10.18653/v1/2022.case-1.17</identifier>
<location>
<url>https://aclanthology.org/2022.case-1.17</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>124</start>
<end>132</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Zero-Shot Ranking Socio-Political Texts with Transformer Language Models to Reduce Close Reading Time
%A Akdemir, Kiymet
%A Hürriyetoğlu, Ali
%Y Hürriyetoğlu, Ali
%Y Tanev, Hristo
%Y Zavarella, Vanni
%Y Yörük, Erdem
%S Proceedings of the 5th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F akdemir-hurriyetoglu-2022-zero
%X We approach the classification problem as an entailment problem and apply zero-shot ranking to socio-political texts. Documents that are ranked at the top can be considered positively classified documents and this reduces the close reading time for the information extraction process. We use Transformer Language Models to get the entailment probabilities and investigate different types of queries. We find that DeBERTa achieves higher mean average precision scores than RoBERTa and when declarative form of the class label is used as a query, it outperforms dictionary definition of the class label. We show that one can reduce the close reading time by taking some percentage of the ranked documents that the percentage depends on how much recall they want to achieve. However, our findings also show that percentage of the documents that should be read increases as the topic gets broader.
%R 10.18653/v1/2022.case-1.17
%U https://aclanthology.org/2022.case-1.17
%U https://doi.org/10.18653/v1/2022.case-1.17
%P 124-132
Markdown (Informal)
[Zero-Shot Ranking Socio-Political Texts with Transformer Language Models to Reduce Close Reading Time](https://aclanthology.org/2022.case-1.17) (Akdemir & Hürriyetoğlu, CASE 2022)
ACL