{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,21]],"date-time":"2025-11-21T12:38:39Z","timestamp":1763728719589},"reference-count":21,"publisher":"Association for Natural Language Processing","issue":"1","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["Journal of Natural Language Processing"],"published-print":{"date-parts":[[2024]]},"DOI":"10.5715\/jnlp.31.250","type":"journal-article","created":{"date-parts":[[2024,3,14]],"date-time":"2024-03-14T22:15:40Z","timestamp":1710454540000},"page":"250-265","source":"Crossref","is-referenced-by-count":2,"title":["Focused Prefix Tuning for Controllable Text Generation"],"prefix":"10.5715","volume":"31","author":[{"given":"Congda","family":"Ma","sequence":"first","affiliation":[{"name":"Tokyo Institute of Technology"}]},{"given":"Tianyu","family":"Zhao","sequence":"additional","affiliation":[{"name":"rinna Co. Ltd."}]},{"given":"Makoto","family":"Shing","sequence":"additional","affiliation":[{"name":"Stability AI Ltd."}]},{"given":"Kei","family":"Sawada","sequence":"additional","affiliation":[{"name":"rinna Co. Ltd."}]},{"given":"Manabu","family":"Okumura","sequence":"additional","affiliation":[{"name":"Tokyo Institute of Technology"}]}],"member":"3685","reference":[{"key":"1","unstructured":"Alvin, C., Yew-Soon, O., Bill, P., Aston, Z., and Jie, F. (2021). \u201cCoCon: A Self-Supervised Approach for Controlled Text Generation.\u201d In <i>9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3\u20137, 2021<\/i>."},{"key":"2","unstructured":"Dathathri, S., Madotto, A., Lan, J., Hung, J., Frank, E., Molino, P., Yosinski, J., and Liu, R. (2020). \u201cPlug and Play Language Models: A Simple Approach to Controlled Text Generation.\u201d In <i>8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26\u201330, 2020<\/i>."},{"key":"3","doi-asserted-by":"crossref","unstructured":"Ficler, J. and Goldberg, Y. (2017). \u201cControlling Linguistic Style Aspects in Neural Language Generation.\u201d In <i>Proceedings of the Workshop on Stylistic Variation<\/i>, pp. 94\u2013104.","DOI":"10.18653\/v1\/W17-4912"},{"key":"4","doi-asserted-by":"crossref","unstructured":"Gu, Y., Feng, X., Ma, S., Zhang, L., Gong, H., and Qin, B. (2022). \u201cA Distributional Lens for Multi-Aspect Controllable Text Generation.\u201d In <i>Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing<\/i>, pp. 1023\u20131043.","DOI":"10.18653\/v1\/2022.emnlp-main.67"},{"key":"5","unstructured":"He, P., Liu, X., Gao, J., and Chen, W. (2021). \u201cDeberta: decoding-Enhanced Bert with Disentangled Attention.\u201d In <i>9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3\u20137, 2021<\/i>."},{"key":"6","unstructured":"Holtzman, A., Buys, J., Du, L., Forbes, M., and Choi, Y. (2020). \u201cThe Curious Case of Neural Text Degeneration.\u201d In <i>ICLR<\/i>."},{"key":"7","unstructured":"Keskar, N. S., McCann, B., Varshney, L. R., Xiong, C., and Socher, R. (2019). \u201cCTRL: A Conditional Transformer Language Model for Controllable Generation.\u201d <i>ArXiv<\/i>, abs\/1909.05858."},{"key":"8","doi-asserted-by":"crossref","unstructured":"Krause, B., Gotmare, A. D., McCann, B., Keskar, N. S., Joty, S., Socher, R., and Rajani, N. F. (2021). \u201cGeDi: Generative Discriminator Guided Sequence Generation.\u201d In <i>Findings of the Association for Computational Linguistics: EMNLP 2021<\/i>, pp. 4929\u20134952.","DOI":"10.18653\/v1\/2021.findings-emnlp.424"},{"key":"9","doi-asserted-by":"crossref","unstructured":"Lester, B., Al-Rfou, R., and Constant, N. (2021). \u201cThe Power of Scale for Parameter-Efficient Prompt Tuning.\u201d In <i>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing<\/i>, pp. 3045\u20133059.","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"10","doi-asserted-by":"crossref","unstructured":"Lewis, M., Liu, Y., Goyal, N., Ghazvininejad, M., Mohamed, A., Levy, O., Stoyanov, V., and Zettlemoyer, L. (2020). \u201cBART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension.\u201d In <i>Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics<\/i>, pp. 7871\u20137880.","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"11","doi-asserted-by":"crossref","unstructured":"Li, X. L. and Liang, P. (2021). \u201cPrefix-Tuning: Optimizing Continuous Prompts for Generation.\u201d In <i>Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)<\/i>, pp. 4582\u20134597.","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"12","doi-asserted-by":"crossref","unstructured":"Liu, A., Sap, M., Lu, X., Swayamdipta, S., Bhagavatula, C., Smith, N. A., and Choi, Y. (2021). \u201cDExperts: Decoding-Time Controlled Text Generation with Experts and Anti-Experts.\u201d In <i>Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)<\/i>, pp. 6691\u20136706.","DOI":"10.18653\/v1\/2021.acl-long.522"},{"key":"13","unstructured":"Liu, Y., Ott, M., Goyal, N., Du, J., Joshi, M., Chen, D., Levy, O., Lewis, M., Zettlemoyer, L., and Stoyanov, V. (2019). \u201cRoBERTa: A Robustly Optimized BERT Pretraining Approach.\u201d <i>CoRR<\/i>, abs\/1907.11692."},{"key":"14","doi-asserted-by":"crossref","unstructured":"Ma, C., Zhao, T., Shing, M., Sawada, K., and Okumura, M. (2023). \u201cFocused Prefix Tuning for Controllable Text Generation.\u201d In <i>Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)<\/i>, pp. 1116\u20131127.","DOI":"10.18653\/v1\/2023.acl-short.96"},{"key":"15","unstructured":"Maas, A. L., Daly, R. E., Pham, P. T., Huang, D., Ng, A. Y., and Potts, C. (2011). \u201cLearning Word Vectors for Sentiment Analysis.\u201d In <i>Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies<\/i>, pp. 142\u2013150."},{"key":"16","doi-asserted-by":"crossref","unstructured":"Madotto, A., Ishii, E., Lin, Z., Dathathri, S., and Fung, P. (2020). \u201cPlug-and-Play Conversational Models.\u201d In <i>Findings of the Association for Computational Linguistics: EMNLP 2020<\/i>, pp. 2422\u20132433.","DOI":"10.18653\/v1\/2020.findings-emnlp.219"},{"key":"17","doi-asserted-by":"crossref","unstructured":"Mireshghallah, F., Goyal, K., and Berg-Kirkpatrick, T. (2022). \u201cMix and Match: Learning-free Controllable Text Generationusing Energy Language Models.\u201d In <i>Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)<\/i>, pp. 401\u2013415.","DOI":"10.18653\/v1\/2022.acl-long.31"},{"key":"18","doi-asserted-by":"crossref","unstructured":"Qian, J., Dong, L., Shen, Y., Wei, F., and Chen, W. (2022). \u201cControllable Natural Language Generation with Contrastive Prefixes.\u201d In <i>Findings of the Association for Computational Linguistics: ACL 2022<\/i>, pp. 2912\u20132924.","DOI":"10.18653\/v1\/2022.findings-acl.229"},{"key":"19","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., and Sutskever, I. (2019). \u201cLanguage Models are Unsupervised Multitask Learners.\u201d <i>OpenAI Blog<\/i>, 1 (8). 9."},{"key":"20","doi-asserted-by":"crossref","unstructured":"Yang, K. and Klein, D. (2021). \u201cFUDGE: Controlled Text Generation With Future Discriminators.\u201d In <i>Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies<\/i>, pp. 3511\u20133535.","DOI":"10.18653\/v1\/2021.naacl-main.276"},{"key":"21","unstructured":"Zhang, X., Zhao, J., and LeCun, Y. (2015). \u201cCharacter-level Convolutional Networks for Text Classification.\u201d In <i>Advances in Neural Information Processing Systems<\/i>, pp. 649\u2013657."}],"container-title":["Journal of Natural Language Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/jnlp\/31\/1\/31_250\/_pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,16]],"date-time":"2024-03-16T04:34:14Z","timestamp":1710563654000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/jnlp\/31\/1\/31_250\/_article"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":21,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2024]]}},"URL":"https:\/\/doi.org\/10.5715\/jnlp.31.250","relation":{},"ISSN":["1340-7619","2185-8314"],"issn-type":[{"value":"1340-7619","type":"print"},{"value":"2185-8314","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}