@inproceedings{wiriyathammabhum-2021-ttcb,
title = "{TTCB} System Description to a Shared Task on Implicit and Underspecified Language 2021",
author = "Wiriyathammabhum, Peratham",
editor = "Roth, Michael and
Tsarfaty, Reut and
Goldberg, Yoav",
booktitle = "Proceedings of the 1st Workshop on Understanding Implicit and Underspecified Language",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.unimplicit-1.8",
doi = "10.18653/v1/2021.unimplicit-1.8",
pages = "64--70",
abstract = "In this report, we describe our transformers for text classification baseline (TTCB) submissions to a shared task on implicit and underspecified language 2021. We cast the task of predicting revision requirements in collaboratively edited instructions as text classification. We considered transformer-based models which are the current state-of-the-art methods for text classification. We explored different training schemes, loss functions, and data augmentations. Our best result of \textit{68.45{\%}} test accuracy (\textit{68.84{\%}} validation accuracy), however, consists of an XLNet model with a linear annealing scheduler and a cross-entropy loss. We do not observe any significant gain on any validation metric based on our various design choices except the MiniLM which has a higher validation F1 score and is faster to train by a half but also a lower validation accuracy score.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wiriyathammabhum-2021-ttcb">
<titleInfo>
<title>TTCB System Description to a Shared Task on Implicit and Underspecified Language 2021</title>
</titleInfo>
<name type="personal">
<namePart type="given">Peratham</namePart>
<namePart type="family">Wiriyathammabhum</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Understanding Implicit and Underspecified Language</title>
</titleInfo>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Roth</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Reut</namePart>
<namePart type="family">Tsarfaty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this report, we describe our transformers for text classification baseline (TTCB) submissions to a shared task on implicit and underspecified language 2021. We cast the task of predicting revision requirements in collaboratively edited instructions as text classification. We considered transformer-based models which are the current state-of-the-art methods for text classification. We explored different training schemes, loss functions, and data augmentations. Our best result of 68.45% test accuracy (68.84% validation accuracy), however, consists of an XLNet model with a linear annealing scheduler and a cross-entropy loss. We do not observe any significant gain on any validation metric based on our various design choices except the MiniLM which has a higher validation F1 score and is faster to train by a half but also a lower validation accuracy score.</abstract>
<identifier type="citekey">wiriyathammabhum-2021-ttcb</identifier>
<identifier type="doi">10.18653/v1/2021.unimplicit-1.8</identifier>
<location>
<url>https://aclanthology.org/2021.unimplicit-1.8</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>64</start>
<end>70</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T TTCB System Description to a Shared Task on Implicit and Underspecified Language 2021
%A Wiriyathammabhum, Peratham
%Y Roth, Michael
%Y Tsarfaty, Reut
%Y Goldberg, Yoav
%S Proceedings of the 1st Workshop on Understanding Implicit and Underspecified Language
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F wiriyathammabhum-2021-ttcb
%X In this report, we describe our transformers for text classification baseline (TTCB) submissions to a shared task on implicit and underspecified language 2021. We cast the task of predicting revision requirements in collaboratively edited instructions as text classification. We considered transformer-based models which are the current state-of-the-art methods for text classification. We explored different training schemes, loss functions, and data augmentations. Our best result of 68.45% test accuracy (68.84% validation accuracy), however, consists of an XLNet model with a linear annealing scheduler and a cross-entropy loss. We do not observe any significant gain on any validation metric based on our various design choices except the MiniLM which has a higher validation F1 score and is faster to train by a half but also a lower validation accuracy score.
%R 10.18653/v1/2021.unimplicit-1.8
%U https://aclanthology.org/2021.unimplicit-1.8
%U https://doi.org/10.18653/v1/2021.unimplicit-1.8
%P 64-70
Markdown (Informal)
[TTCB System Description to a Shared Task on Implicit and Underspecified Language 2021](https://aclanthology.org/2021.unimplicit-1.8) (Wiriyathammabhum, unimplicit 2021)
ACL