@inproceedings{razumovskaia-etal-2024-sqatin,
title = "{SQATIN}: Supervised Instruction Tuning Meets Question Answering for Improved Dialogue {NLU}",
author = "Razumovskaia, Evgeniia and
Glava{\v{s}}, Goran and
Korhonen, Anna and
Vuli{\'c}, Ivan",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.naacl-long.453/",
doi = "10.18653/v1/2024.naacl-long.453",
pages = "8195--8211",
abstract = "Task-oriented dialogue (TOD) systems help users execute well-defined tasks across a variety of domains (e.g., \textit{flight booking} or \textit{food ordering}), with their Natural Language Understanding (NLU) components being dedicated to the analysis of user utterances, predicting users' intents (\textit{Intent Detection}, ID) and extracting values for informational slots (\textit{Value Extraction}, VE). In most domains, labelled NLU data is scarce, making sample-efficient learning {--} enabled with effective transfer paradigms {--} paramount. In this work, we introduce SQATIN, a new framework for dialog NLU based on (i) instruction tuning and (ii) question-answering-based formulation of ID and VE tasks. According to the evaluation on established NLU benchmarks, SQATIN sets the new state of the art in dialogue NLU, substantially surpassing the performance of current models based on standard fine-tuning objectives in both in-domain training and cross-domain transfer, and it also surpasses off-the-shelf large language models for the same task, both in terms of performance and inference efficiency. Furthermore, SQATIN yields particularly large performance gains in cross-domain transfer, owing to the fact that our QA-based instruction tuning leverages similarities between natural language descriptions of classes (i.e., slots and intents) across domains."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="razumovskaia-etal-2024-sqatin">
<titleInfo>
<title>SQATIN: Supervised Instruction Tuning Meets Question Answering for Improved Dialogue NLU</title>
</titleInfo>
<name type="personal">
<namePart type="given">Evgeniia</namePart>
<namePart type="family">Razumovskaia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Goran</namePart>
<namePart type="family">Glavaš</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Korhonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Vulić</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Duh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Gomez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Task-oriented dialogue (TOD) systems help users execute well-defined tasks across a variety of domains (e.g., flight booking or food ordering), with their Natural Language Understanding (NLU) components being dedicated to the analysis of user utterances, predicting users’ intents (Intent Detection, ID) and extracting values for informational slots (Value Extraction, VE). In most domains, labelled NLU data is scarce, making sample-efficient learning – enabled with effective transfer paradigms – paramount. In this work, we introduce SQATIN, a new framework for dialog NLU based on (i) instruction tuning and (ii) question-answering-based formulation of ID and VE tasks. According to the evaluation on established NLU benchmarks, SQATIN sets the new state of the art in dialogue NLU, substantially surpassing the performance of current models based on standard fine-tuning objectives in both in-domain training and cross-domain transfer, and it also surpasses off-the-shelf large language models for the same task, both in terms of performance and inference efficiency. Furthermore, SQATIN yields particularly large performance gains in cross-domain transfer, owing to the fact that our QA-based instruction tuning leverages similarities between natural language descriptions of classes (i.e., slots and intents) across domains.</abstract>
<identifier type="citekey">razumovskaia-etal-2024-sqatin</identifier>
<identifier type="doi">10.18653/v1/2024.naacl-long.453</identifier>
<location>
<url>https://aclanthology.org/2024.naacl-long.453/</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>8195</start>
<end>8211</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T SQATIN: Supervised Instruction Tuning Meets Question Answering for Improved Dialogue NLU
%A Razumovskaia, Evgeniia
%A Glavaš, Goran
%A Korhonen, Anna
%A Vulić, Ivan
%Y Duh, Kevin
%Y Gomez, Helena
%Y Bethard, Steven
%S Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F razumovskaia-etal-2024-sqatin
%X Task-oriented dialogue (TOD) systems help users execute well-defined tasks across a variety of domains (e.g., flight booking or food ordering), with their Natural Language Understanding (NLU) components being dedicated to the analysis of user utterances, predicting users’ intents (Intent Detection, ID) and extracting values for informational slots (Value Extraction, VE). In most domains, labelled NLU data is scarce, making sample-efficient learning – enabled with effective transfer paradigms – paramount. In this work, we introduce SQATIN, a new framework for dialog NLU based on (i) instruction tuning and (ii) question-answering-based formulation of ID and VE tasks. According to the evaluation on established NLU benchmarks, SQATIN sets the new state of the art in dialogue NLU, substantially surpassing the performance of current models based on standard fine-tuning objectives in both in-domain training and cross-domain transfer, and it also surpasses off-the-shelf large language models for the same task, both in terms of performance and inference efficiency. Furthermore, SQATIN yields particularly large performance gains in cross-domain transfer, owing to the fact that our QA-based instruction tuning leverages similarities between natural language descriptions of classes (i.e., slots and intents) across domains.
%R 10.18653/v1/2024.naacl-long.453
%U https://aclanthology.org/2024.naacl-long.453/
%U https://doi.org/10.18653/v1/2024.naacl-long.453
%P 8195-8211
Markdown (Informal)
[SQATIN: Supervised Instruction Tuning Meets Question Answering for Improved Dialogue NLU](https://aclanthology.org/2024.naacl-long.453/) (Razumovskaia et al., NAACL 2024)
ACL