@inproceedings{bhardwaj-etal-2023-multimodal,
title = "Multimodal Learning for Accurate Visual Question Answering: An Attention-Based Approach",
author = "Bhardwaj, Jishnu and
Balakrishnan, Anurag and
Pathak, Satyam and
Unnarkar, Ishan and
Gawande, Aniruddha and
Ahmadnia, Benyamin",
editor = "Mitkov, Ruslan and
Angelova, Galia",
booktitle = "Proceedings of the 14th International Conference on Recent Advances in Natural Language Processing",
month = sep,
year = "2023",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://aclanthology.org/2023.ranlp-1.20",
pages = "179--186",
abstract = "This paper proposes an open-ended task for Visual Question Answering (VQA) that leverages the InceptionV3 Object Detection model and an attention-based Long Short-Term Memory (LSTM) network for question answering. Our proposed model provides accurate natural language answers to questions about an image, including those that require understanding contextual information and background details. Our findings demonstrate that the proposed approach can achieve high accuracy, even with complex and varied visual information. The proposed method can contribute to developing more advanced vision systems that can process and interpret visual information like humans.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bhardwaj-etal-2023-multimodal">
<titleInfo>
<title>Multimodal Learning for Accurate Visual Question Answering: An Attention-Based Approach</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jishnu</namePart>
<namePart type="family">Bhardwaj</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anurag</namePart>
<namePart type="family">Balakrishnan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Satyam</namePart>
<namePart type="family">Pathak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ishan</namePart>
<namePart type="family">Unnarkar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aniruddha</namePart>
<namePart type="family">Gawande</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Benyamin</namePart>
<namePart type="family">Ahmadnia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 14th International Conference on Recent Advances in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ruslan</namePart>
<namePart type="family">Mitkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Galia</namePart>
<namePart type="family">Angelova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd., Shoumen, Bulgaria</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper proposes an open-ended task for Visual Question Answering (VQA) that leverages the InceptionV3 Object Detection model and an attention-based Long Short-Term Memory (LSTM) network for question answering. Our proposed model provides accurate natural language answers to questions about an image, including those that require understanding contextual information and background details. Our findings demonstrate that the proposed approach can achieve high accuracy, even with complex and varied visual information. The proposed method can contribute to developing more advanced vision systems that can process and interpret visual information like humans.</abstract>
<identifier type="citekey">bhardwaj-etal-2023-multimodal</identifier>
<location>
<url>https://aclanthology.org/2023.ranlp-1.20</url>
</location>
<part>
<date>2023-09</date>
<extent unit="page">
<start>179</start>
<end>186</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multimodal Learning for Accurate Visual Question Answering: An Attention-Based Approach
%A Bhardwaj, Jishnu
%A Balakrishnan, Anurag
%A Pathak, Satyam
%A Unnarkar, Ishan
%A Gawande, Aniruddha
%A Ahmadnia, Benyamin
%Y Mitkov, Ruslan
%Y Angelova, Galia
%S Proceedings of the 14th International Conference on Recent Advances in Natural Language Processing
%D 2023
%8 September
%I INCOMA Ltd., Shoumen, Bulgaria
%C Varna, Bulgaria
%F bhardwaj-etal-2023-multimodal
%X This paper proposes an open-ended task for Visual Question Answering (VQA) that leverages the InceptionV3 Object Detection model and an attention-based Long Short-Term Memory (LSTM) network for question answering. Our proposed model provides accurate natural language answers to questions about an image, including those that require understanding contextual information and background details. Our findings demonstrate that the proposed approach can achieve high accuracy, even with complex and varied visual information. The proposed method can contribute to developing more advanced vision systems that can process and interpret visual information like humans.
%U https://aclanthology.org/2023.ranlp-1.20
%P 179-186
Markdown (Informal)
[Multimodal Learning for Accurate Visual Question Answering: An Attention-Based Approach](https://aclanthology.org/2023.ranlp-1.20) (Bhardwaj et al., RANLP 2023)
ACL
- Jishnu Bhardwaj, Anurag Balakrishnan, Satyam Pathak, Ishan Unnarkar, Aniruddha Gawande, and Benyamin Ahmadnia. 2023. Multimodal Learning for Accurate Visual Question Answering: An Attention-Based Approach. In Proceedings of the 14th International Conference on Recent Advances in Natural Language Processing, pages 179–186, Varna, Bulgaria. INCOMA Ltd., Shoumen, Bulgaria.