{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T11:25:25Z","timestamp":1772796325187,"version":"3.50.1"},"reference-count":32,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Computer Communications"],"published-print":{"date-parts":[[2023,10]]},"DOI":"10.1016\/j.comcom.2023.07.026","type":"journal-article","created":{"date-parts":[[2023,7,28]],"date-time":"2023-07-28T18:14:29Z","timestamp":1690568069000},"page":"1-9","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":5,"special_numbering":"C","title":["A multimodal dual-fusion entity extraction model for large and complex devices"],"prefix":"10.1016","volume":"210","author":[{"given":"Weiming","family":"Tong","sequence":"first","affiliation":[]},{"given":"Xu","family":"Chu","sequence":"additional","affiliation":[]},{"given":"Wenqi","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Zhongwei","family":"Li","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.comcom.2023.07.026_b1","doi-asserted-by":"crossref","DOI":"10.1088\/1742-6596\/1350\/1\/012081","article-title":"Sesri 300mev proton and heavy ion accelerator","author":"Jiang","year":"2019","journal-title":"J. Phys. Conf. Ser."},{"key":"10.1016\/j.comcom.2023.07.026_b2","first-page":"91","article-title":"Vibration of scanning magnet for space environment simulation and research infrastructure","volume":"05","author":"Jiuwei","year":"2021","journal-title":"High Power Laser Part. Beams"},{"key":"10.1016\/j.comcom.2023.07.026_b3","first-page":"1","article-title":"The transmission and parse technology of multi-source heterogeneous data based on opc ua in intelligent manufacturing","volume":"01","author":"Zhang","year":"2021","journal-title":"Mech. Electr. Eng. Technol."},{"key":"10.1016\/j.comcom.2023.07.026_b4","article-title":"Research on multi-source heterogeneous data fusion technology for complex information system","volume":"07","author":"Zhang","year":"2020","journal-title":"China Meas. Test"},{"key":"10.1016\/j.comcom.2023.07.026_b5","doi-asserted-by":"crossref","first-page":"271","DOI":"10.1016\/j.comcom.2020.05.017","article-title":"Structure-augmented knowledge graph embed ding for sparse data with rule learning","volume":"159","author":"Zhao","year":"2020","journal-title":"Comput. Commun."},{"key":"10.1016\/j.comcom.2023.07.026_b6","doi-asserted-by":"crossref","first-page":"21","DOI":"10.1016\/j.comcom.2021.03.012","article-title":"Representation method of cooperative social network features based on node2vec model","volume":"173","author":"You","year":"2021","journal-title":"Comput. Commun."},{"key":"10.1016\/j.comcom.2023.07.026_b7","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1162\/dint_a_00114","article-title":"Visual entity linking via multi-modal learning","volume":"1","author":"Zheng","year":"2022","journal-title":"Data Intell."},{"key":"10.1016\/j.comcom.2023.07.026_b8","doi-asserted-by":"crossref","unstructured":"Y. Liu, H. Li, A. Garcia-Duran, Mmkg: multi-modal knowledge graphs, in: European Semantic Web Conference, 2019, pp. 459\u2013474.","DOI":"10.1007\/978-3-030-21348-0_30"},{"key":"10.1016\/j.comcom.2023.07.026_b9","doi-asserted-by":"crossref","unstructured":"M. Wang, G. Qi, H. Wang, Richpedia: A comprehensive multi-modal knowledge graph, in: Joint International Semantic Technology Conference, 2019, pp. 130\u2013145.","DOI":"10.1007\/978-3-030-41407-8_9"},{"key":"10.1016\/j.comcom.2023.07.026_b10","article-title":"Multi-modal knowledge graph construction and application: A survey","author":"Zhu","year":"2022","journal-title":"IEEE Trans. Knowl. Data Eng."},{"key":"10.1016\/j.comcom.2023.07.026_b11","doi-asserted-by":"crossref","first-page":"478","DOI":"10.1109\/JSTSP.2020.2987728","article-title":"Multimodal intelligence: Representation learning, information fusion, and applications","volume":"3","author":"Zhang","year":"2020","journal-title":"IEEE J. Sel. Top. Sign. Proces."},{"key":"10.1016\/j.comcom.2023.07.026_b12","first-page":"423","article-title":"Multimodal machine learning: A survey and taxonomy","volume":"2","author":"Baltru\u0161aitis","year":"2018","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.comcom.2023.07.026_b13","series-title":"Document-level event extraction via heterogeneous graph-based interaction model with a tracker","author":"Xu","year":"2021"},{"key":"10.1016\/j.comcom.2023.07.026_b14","series-title":"Multimodal attribute extraction","author":"Logan","year":"2021"},{"key":"10.1016\/j.comcom.2023.07.026_b15","article-title":"ViLBERT: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","author":"Lu","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.comcom.2023.07.026_b16","series-title":"LXMERT: Learning cross-modality encoder representations from transformers","author":"Tan","year":"2019"},{"key":"10.1016\/j.comcom.2023.07.026_b17","series-title":"SimVLM: Simple visual language model pre-training with weak supervision","author":"Wang","year":"2021"},{"key":"10.1016\/j.comcom.2023.07.026_b18","series-title":"KM-BART: Knowledge enhanced multimodal bart for visual commonsense generation","author":"Xing","year":"2021"},{"key":"10.1016\/j.comcom.2023.07.026_b19","doi-asserted-by":"crossref","unstructured":"Z. Zhao, H. Lu, C. Deng, Partial multi-modal sparse coding via adaptive similarity structure regularization, in: Proceedings of the 24th ACM International Conference on Multimedia, 2016, pp. 152\u2013156.","DOI":"10.1145\/2964284.2967201"},{"key":"10.1016\/j.comcom.2023.07.026_b20","doi-asserted-by":"crossref","first-page":"370","DOI":"10.1109\/TMM.2015.2390499","article-title":"Learning consistent feature representation for cross-modal multimedia retrieval","volume":"3","author":"Kang","year":"2015","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.comcom.2023.07.026_b21","doi-asserted-by":"crossref","first-page":"57","DOI":"10.1016\/j.comcom.2020.11.013","article-title":"Class consistent and joint group sparse representation model for image classification in internet of medical things","volume":"166","author":"Gao","year":"2021","journal-title":"Comput. Commun."},{"key":"10.1016\/j.comcom.2023.07.026_b22","doi-asserted-by":"crossref","unstructured":"C. Sun, A. Myers, C. Vondrick, VideoBERT: A joint model for video and language representation learning, in: Proceedings of the IEEE\/CVF International Conference on Computer Vision, 2019, pp. 7464\u20137473.","DOI":"10.1109\/ICCV.2019.00756"},{"key":"10.1016\/j.comcom.2023.07.026_b23","series-title":"Trusted multi-view classification","author":"Han","year":"2021"},{"key":"10.1016\/j.comcom.2023.07.026_b24","doi-asserted-by":"crossref","first-page":"197","DOI":"10.1016\/j.comcom.2022.06.005","article-title":"Predicting application usage based on latent contextual information","volume":"192","author":"Solomon","year":"2022","journal-title":"Comput. Commun."},{"key":"10.1016\/j.comcom.2023.07.026_b25","doi-asserted-by":"crossref","first-page":"320","DOI":"10.1016\/j.comcom.2021.10.022","article-title":"Learning compatibility knowledge for outfit recommendation with complementary clothing matching","volume":"181","author":"Wang","year":"2022","journal-title":"Comput. Commun."},{"key":"10.1016\/j.comcom.2023.07.026_b26","doi-asserted-by":"crossref","unstructured":"X. Wei, T. Zhang, Y. Li, Multi-modality cross attention network for image and sentence matching, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 10941\u201310950.","DOI":"10.1109\/CVPR42600.2020.01095"},{"key":"10.1016\/j.comcom.2023.07.026_b27","doi-asserted-by":"crossref","unstructured":"Z. Jin, J. Cao, H. Guo, Multimodal fusion with recurrent neural networks for rumor detection on microblogs, in: Proceedings of the 25th ACM International Conference on Multimedia, 2017, pp. 795\u2013816.","DOI":"10.1145\/3123266.3123454"},{"key":"10.1016\/j.comcom.2023.07.026_b28","first-page":"4835","article-title":"Deep multimodal fusion by channel exchanging","author":"Wang","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.comcom.2023.07.026_b29","doi-asserted-by":"crossref","first-page":"182","DOI":"10.1016\/j.comcom.2021.10.019","article-title":"Construction of multi-modal perception model of communicative robot in non-structural cyber physical system environment based on optimized bt-svm model","volume":"181","author":"Zeng","year":"2022","journal-title":"Comput. Commun."},{"key":"10.1016\/j.comcom.2023.07.026_b30","article-title":"Attention is all you need","author":"Vaswani","year":"2017","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.comcom.2023.07.026_b31","first-page":"195","article-title":"Image caption algorithm based on ViLBERT and BiLSTM","volume":"11","author":"Xu","year":"2021","journal-title":"Comput. Syst. Appl."},{"key":"10.1016\/j.comcom.2023.07.026_b32","first-page":"48","article-title":"Chinese entity recognition based on bert-BiLSTM- CRF model","volume":"07","author":"Xie","year":"2020","journal-title":"Comput. Syst. Appl."}],"container-title":["Computer Communications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0140366423002608?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0140366423002608?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,9,25]],"date-time":"2025-09-25T23:42:34Z","timestamp":1758843754000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0140366423002608"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10]]},"references-count":32,"alternative-id":["S0140366423002608"],"URL":"https:\/\/doi.org\/10.1016\/j.comcom.2023.07.026","relation":{},"ISSN":["0140-3664"],"issn-type":[{"value":"0140-3664","type":"print"}],"subject":[],"published":{"date-parts":[[2023,10]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"A multimodal dual-fusion entity extraction model for large and complex devices","name":"articletitle","label":"Article Title"},{"value":"Computer Communications","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.comcom.2023.07.026","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2023 Elsevier B.V. All rights reserved.","name":"copyright","label":"Copyright"}]}}