@inproceedings{yung-etal-2024-prompting,
title = "Prompting Implicit Discourse Relation Annotation",
author = "Yung, Frances and
Ahmad, Mansoor and
Scholman, Merel and
Demberg, Vera",
editor = "Henning, Sophie and
Stede, Manfred",
booktitle = "Proceedings of The 18th Linguistic Annotation Workshop (LAW-XVIII)",
month = mar,
year = "2024",
address = "St. Julians, Malta",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.law-1.15",
pages = "150--165",
abstract = "Pre-trained large language models, such as ChatGPT, archive outstanding performance in various reasoning tasks without supervised training and were found to have outperformed crowdsourcing workers. Nonetheless, ChatGPT{'}s performance in the task of implicit discourse relation classification, prompted by a standard multiple-choice question, is still far from satisfactory and considerably inferior to state-of-the-art supervised approaches. This work investigates several proven prompting techniques to improve ChatGPT{'}s recognition of discourse relations. In particular, we experimented with breaking down the classification task that involves numerous abstract labels into smaller subtasks. Nonetheless, experiment results show that the inference accuracy hardly changes even with sophisticated prompt engineering, suggesting that implicit discourse relation classification is not yet resolvable under zero-shot or few-shot settings.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yung-etal-2024-prompting">
<titleInfo>
<title>Prompting Implicit Discourse Relation Annotation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Frances</namePart>
<namePart type="family">Yung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mansoor</namePart>
<namePart type="family">Ahmad</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Merel</namePart>
<namePart type="family">Scholman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vera</namePart>
<namePart type="family">Demberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of The 18th Linguistic Annotation Workshop (LAW-XVIII)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sophie</namePart>
<namePart type="family">Henning</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manfred</namePart>
<namePart type="family">Stede</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">St. Julians, Malta</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Pre-trained large language models, such as ChatGPT, archive outstanding performance in various reasoning tasks without supervised training and were found to have outperformed crowdsourcing workers. Nonetheless, ChatGPT’s performance in the task of implicit discourse relation classification, prompted by a standard multiple-choice question, is still far from satisfactory and considerably inferior to state-of-the-art supervised approaches. This work investigates several proven prompting techniques to improve ChatGPT’s recognition of discourse relations. In particular, we experimented with breaking down the classification task that involves numerous abstract labels into smaller subtasks. Nonetheless, experiment results show that the inference accuracy hardly changes even with sophisticated prompt engineering, suggesting that implicit discourse relation classification is not yet resolvable under zero-shot or few-shot settings.</abstract>
<identifier type="citekey">yung-etal-2024-prompting</identifier>
<location>
<url>https://aclanthology.org/2024.law-1.15</url>
</location>
<part>
<date>2024-03</date>
<extent unit="page">
<start>150</start>
<end>165</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Prompting Implicit Discourse Relation Annotation
%A Yung, Frances
%A Ahmad, Mansoor
%A Scholman, Merel
%A Demberg, Vera
%Y Henning, Sophie
%Y Stede, Manfred
%S Proceedings of The 18th Linguistic Annotation Workshop (LAW-XVIII)
%D 2024
%8 March
%I Association for Computational Linguistics
%C St. Julians, Malta
%F yung-etal-2024-prompting
%X Pre-trained large language models, such as ChatGPT, archive outstanding performance in various reasoning tasks without supervised training and were found to have outperformed crowdsourcing workers. Nonetheless, ChatGPT’s performance in the task of implicit discourse relation classification, prompted by a standard multiple-choice question, is still far from satisfactory and considerably inferior to state-of-the-art supervised approaches. This work investigates several proven prompting techniques to improve ChatGPT’s recognition of discourse relations. In particular, we experimented with breaking down the classification task that involves numerous abstract labels into smaller subtasks. Nonetheless, experiment results show that the inference accuracy hardly changes even with sophisticated prompt engineering, suggesting that implicit discourse relation classification is not yet resolvable under zero-shot or few-shot settings.
%U https://aclanthology.org/2024.law-1.15
%P 150-165
Markdown (Informal)
[Prompting Implicit Discourse Relation Annotation](https://aclanthology.org/2024.law-1.15) (Yung et al., LAW-WS 2024)
ACL
- Frances Yung, Mansoor Ahmad, Merel Scholman, and Vera Demberg. 2024. Prompting Implicit Discourse Relation Annotation. In Proceedings of The 18th Linguistic Annotation Workshop (LAW-XVIII), pages 150–165, St. Julians, Malta. Association for Computational Linguistics.