{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,6]],"date-time":"2026-01-06T13:30:46Z","timestamp":1767706246699,"version":"3.28.0"},"reference-count":42,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,10,1]],"date-time":"2019-10-01T00:00:00Z","timestamp":1569888000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,10,1]],"date-time":"2019-10-01T00:00:00Z","timestamp":1569888000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,10,1]],"date-time":"2019-10-01T00:00:00Z","timestamp":1569888000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,10]]},"DOI":"10.1109\/iccv.2019.00436","type":"proceedings-article","created":{"date-parts":[[2020,2,28]],"date-time":"2020-02-28T10:27:52Z","timestamp":1582885672000},"page":"4260-4269","source":"Crossref","is-referenced-by-count":41,"title":["Sequential Latent Spaces for Modeling the Intention During Diverse Image Captioning"],"prefix":"10.1109","author":[{"given":"Jyoti","family":"Aneja","sequence":"first","affiliation":[]},{"given":"Harsh","family":"Agrawal","sequence":"additional","affiliation":[]},{"given":"Dhruv","family":"Batra","sequence":"additional","affiliation":[]},{"given":"Alexander","family":"Schwing","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"ref38","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v32i1.12340","article-title":"Crandall, and Dhruv Batra. Diverse beam search for improved description of complex scenes","author":"vijayakumar","year":"2018","journal-title":"Proc AAAI"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.131"},{"key":"ref32","article-title":"Faster R-CNN: Towards real-time object detection with region proposal networks","author":"ren","year":"2015","journal-title":"Proc NIPS"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1202"},{"key":"ref30","article-title":"Deep Captioning with Multimodal Recurrent Neural Networks (m-rnn)","author":"mao","year":"2015","journal-title":"Proc ICLR"},{"key":"ref37","article-title":"Learning structured output representation using deep conditional generative models","author":"sohn","year":"2015","journal-title":"Proc NIPS"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00177"},{"key":"ref35","article-title":"Very deep convo-lutional networks for large-scale image recognition","author":"simonyan","year":"2014","journal-title":"arXiv preprint arXiv 1409 1556"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.445"},{"key":"ref10","article-title":"Fast, diverse and accurate image captioning guided by part-of-speech","author":"deshpande","year":"2019","journal-title":"Proc CVPR"},{"key":"ref40","article-title":"Diverse and accurate image description using a variational auto-encoder with an additive gaussian encoding space","author":"wang","year":"2017","journal-title":"Proc NIPS"},{"key":"ref11","article-title":"Exploring nearest neighbor approaches for image captioning","volume":"abs 1505 4467","author":"devlin","year":"2015","journal-title":"CoRR"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298754"},{"key":"ref14","article-title":"Every picture tells a story: Generating sentences from images","author":"farhadi","year":"2010","journal-title":"Proc ECCV"},{"key":"ref15","article-title":"Sequential neural models with stochastic layers","author":"fraccaro","year":"2016","journal-title":"Proc NIPS"},{"key":"ref16","article-title":"Generative Adversarial Networks","author":"goodfellow","year":"2014","journal-title":"Proc NIPS"},{"key":"ref17","article-title":"Z-forcing: Training stochastic recurrent networks","author":"goyal","year":"2017","journal-title":"Proc NIPS"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00603"},{"key":"ref28","article-title":"Improved image captioning via policy gradient optimization of spider","author":"liu","year":"2016","journal-title":"arXiv preprint arXiv 1612 00370"},{"key":"ref4","article-title":"Diverse and coherent paragraph generation from images","author":"chatterjee","year":"2018","journal-title":"Proc ECCV"},{"key":"ref27","article-title":"Microsoft coco: Common objects in context","author":"lin","year":"2014","journal-title":"Proc ECCV"},{"key":"ref3","article-title":"Matching words and pictures","author":"barnard","year":"2003","journal-title":"JMLR"},{"key":"ref6","article-title":"Mind&#x2019;s eye: A recurrent visual representation for image caption generation","author":"chen","year":"2015","journal-title":"Proc CVPR"},{"key":"ref29","article-title":"Visualizing data using t-sne","author":"van der maaten","year":"2008","journal-title":"JMLR"},{"key":"ref5","article-title":"One billion word benchmark for measuring progress in statistical language modeling","author":"chelba","year":"2013","journal-title":"arXiv preprint arXiv 1312 3005"},{"key":"ref8","article-title":"A recurrent latent variable model for sequential data","author":"chung","year":"2015","journal-title":"Proc NIPS"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2015.2477044"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00583"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.323"},{"key":"ref1","article-title":"Bottom-up and top-down attention for image captioning and visual question answering","author":"anderson","year":"2017","journal-title":"Proc CVPR"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.575"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.494"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.524"},{"key":"ref24","article-title":"Unifying visual-semantic embeddings with multimodal neural language models","author":"kiros","year":"2014","journal-title":"arXiv preprint arXiv 1411 2539"},{"key":"ref41","article-title":"Show, attend and tell: Neural image caption generation with visual attention","author":"xu","year":"2015","journal-title":"Proc ICML"},{"key":"ref23","article-title":"Semi-supervised learning with deep generative models","author":"kingma","year":"2014","journal-title":"Proc NIPS"},{"key":"ref26","article-title":"Generating diverse and accurate visual captions by comparative adversarial learning","author":"li","year":"2018","journal-title":"arXiv preprint arXiv 1804 00209"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2012.162"}],"event":{"name":"2019 IEEE\/CVF International Conference on Computer Vision (ICCV)","start":{"date-parts":[[2019,10,27]]},"location":"Seoul, Korea (South)","end":{"date-parts":[[2019,11,2]]}},"container-title":["2019 IEEE\/CVF International Conference on Computer Vision (ICCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8972782\/9008105\/09010960.pdf?arnumber=9010960","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,27]],"date-time":"2023-09-27T19:59:32Z","timestamp":1695844772000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9010960\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,10]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/iccv.2019.00436","relation":{},"subject":[],"published":{"date-parts":[[2019,10]]}}}