@misc{efrat2020turking,
    title={The Turking Test: Can Language Models Understand Instructions?},
    author={Avia Efrat and Omer Levy},
    year={2020},
    eprint={2010.11982},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}


%% Image Prompt Engineering
@misc{oppenlaender2022taxonomy,
    title={A Taxonomy of Prompt Modifiers for Text-To-Image Generation},
    author={Jonas Oppenlaender},
    year={2022},
    eprint={2204.13988},
    archivePrefix={arXiv},
    primaryClass={cs.MM}
}

@misc{wang2022diffusiondb,
    title={DiffusionDB: A Large-scale Prompt Gallery Dataset for Text-to-Image Generative Models},
    author={Zijie J. Wang and Evan Montoya and David Munechika and Haoyang Yang and Benjamin Hoover and Duen Horng Chau},
    year={2022},
    eprint={2210.14896},
    archivePrefix={arXiv},
    primaryClass={cs.CV}
}

@misc{hao2022optimizing,
    title={Optimizing Prompts for Text-to-Image Generation},
    author={Yaru Hao and Zewen Chi and Li Dong and Furu Wei},
    year={2022},
    eprint={2212.09611},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}


% Applied Prompt Engineering
% cascades
@misc{dohan2022language,
    title={Language Model Cascades},
    author={David Dohan and Winnie Xu and Aitor Lewkowycz and Jacob Austin and David Bieber and Raphael Gontijo Lopes and Yuhuai Wu and Henryk Michalewski and Rif A. Saurous and Jascha Sohl-dickstein and Kevin Murphy and Charles Sutton},
    year={2022},
    eprint={2207.10342},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}


% User Interface Design
@inproceedings{liu2022design,
author = {Liu, Vivian and Chilton, Lydia B},
title = {Design Guidelines for Prompt Engineering Text-to-Image Generative Models},
year = {2022},
isbn = {9781450391573},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3491102.3501825},
doi = {10.1145/3491102.3501825},
abstract = {Text-to-image generative models are a new and powerful way to generate visual artwork. However, the open-ended nature of text as interaction is double-edged; while users can input anything and have access to an infinite range of generations, they also must engage in brute-force trial and error with the text prompt when the result quality is poor. We conduct a study exploring what prompt keywords and model hyperparameters can help produce coherent outputs. In particular, we study prompts structured to include subject and style keywords and investigate success and failure modes of these prompts. Our evaluation of 5493 generations over the course of five experiments spans 51 abstract and concrete subjects as well as 51 abstract and figurative styles. From this evaluation, we present design guidelines that can help people produce better outcomes from text-to-image generative models.},
booktitle = {Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems},
articleno = {384},
numpages = {23},
keywords = {computational creativity, multimodal generative models, AI co-creation, prompt engineering., text-to-image, design guidelines},
location = {New Orleans, LA, USA},
series = {CHI '22}
}




% Dataset Generation

@misc{perez2022discovering,
    title={Discovering Language Model Behaviors with Model-Written Evaluations},
    author={Ethan Perez and Sam Ringer and Kamilė Lukošiūtė and Karina Nguyen and Edwin Chen and Scott Heiner and Craig Pettit and Catherine Olsson and Sandipan Kundu and Saurav Kadavath and Andy Jones and Anna Chen and Ben Mann and Brian Israel and Bryan Seethor and Cameron McKinnon and Christopher Olah and Da Yan and Daniela Amodei and Dario Amodei and Dawn Drain and Dustin Li and Eli Tran-Johnson and Guro Khundadze and Jackson Kernion and James Landis and Jamie Kerr and Jared Mueller and Jeeyoon Hyun and Joshua Landau and Kamal Ndousse and Landon Goldberg and Liane Lovitt and Martin Lucas and Michael Sellitto and Miranda Zhang and Neerav Kingsland and Nelson Elhage and Nicholas Joseph and Noemí Mercado and Nova DasSarma and Oliver Rausch and Robin Larson and Sam McCandlish and Scott Johnston and Shauna Kravec and Sheer El Showk and Tamera Lanham and Timothy Telleen-Lawton and Tom Brown and Tom Henighan and Tristan Hume and Yuntao Bai and Zac Hatfield-Dodds and Jack Clark and Samuel R. Bowman and Amanda Askell and Roger Grosse and Danny Hernandez and Deep Ganguli and Evan Hubinger and Nicholas Schiefer and Jared Kaplan},
    year={2022},
    eprint={2212.09251},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

@misc{su2022selective,
    title={Selective Annotation Makes Language Models Better Few-Shot Learners},
    author={Hongjin Su and Jungo Kasai and Chen Henry Wu and Weijia Shi and Tianlu Wang and Jiayi Xin and Rui Zhang and Mari Ostendorf and Luke Zettlemoyer and Noah A. Smith and Tao Yu},
    year={2022},
    eprint={2209.01975},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

% applications

@misc{izacard2022atlas,
    title={Atlas: Few-shot Learning with Retrieval Augmented Language Models},
    author={Gautier Izacard and Patrick Lewis and Maria Lomeli and Lucas Hosseini and Fabio Petroni and Timo Schick and Jane Dwivedi-Yu and Armand Joulin and Sebastian Riedel and Edouard Grave},
    year={2022},
    eprint={2208.03299},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

@misc{wang2022strudel,
    title={STRUDEL: Structured Dialogue Summarization for Dialogue Comprehension},
    author={Borui Wang and Chengcheng Feng and Arjun Nair and Madelyn Mao and Jai Desai and Asli Celikyilmaz and Haoran Li and Yashar Mehdad and Dragomir Radev},
    year={2022},
    eprint={2212.12652},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}


% Miscl
@misc{beurerkellner2022prompting,
    title={Prompting Is Programming: A Query Language For Large Language Models},
    author={Luca Beurer-Kellner and Marc Fischer and Martin Vechev},
    year={2022},
    eprint={2212.06094},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

@misc{ratner2022parallel,
    title={Parallel Context Windows Improve In-Context Learning of Large Language Models},
    author={Nir Ratner and Yoav Levine and Yonatan Belinkov and Ori Ram and Omri Abend and Ehud Karpas and Amnon Shashua and Kevin Leyton-Brown and Yoav Shoham},
    year={2022},
    eprint={2212.10947},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

@misc{bursztyn2022learning,
    title={Learning to Perform Complex Tasks through Compositional Fine-Tuning of Language Models},
    author={Victor S. Bursztyn and David Demeter and Doug Downey and Larry Birnbaum},
    year={2022},
    eprint={2210.12607},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

@misc{wang2022supernaturalinstructions,
    title={Super-NaturalInstructions: Generalization via Declarative Instructions on 1600+ NLP Tasks},
    author={Yizhong Wang and Swaroop Mishra and Pegah Alipoormolabashi and Yeganeh Kordi and Amirreza Mirzaei and Anjana Arunkumar and Arjun Ashok and Arut Selvan Dhanasekaran and Atharva Naik and David Stap and Eshaan Pathak and Giannis Karamanolakis and Haizhi Gary Lai and Ishan Purohit and Ishani Mondal and Jacob Anderson and Kirby Kuznia and Krima Doshi and Maitreya Patel and Kuntal Kumar Pal and Mehrad Moradshahi and Mihir Parmar and Mirali Purohit and Neeraj Varshney and Phani Rohitha Kaza and Pulkit Verma and Ravsehaj Singh Puri and Rushang Karia and Shailaja Keyur Sampat and Savan Doshi and Siddhartha Mishra and Sujan Reddy and Sumanta Patro and Tanay Dixit and Xudong Shen and Chitta Baral and Yejin Choi and Noah A. Smith and Hannaneh Hajishirzi and Daniel Khashabi},
    year={2022},
    eprint={2204.07705},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

@article{gao2021making,
   title={Making Pre-trained Language Models Better Few-shot Learners},
   url={http://dx.doi.org/10.18653/v1/2021.acl-long.295},
   DOI={10.18653/v1/2021.acl-long.295},
   journal={Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)},
   publisher={Association for Computational Linguistics},
   author={Gao, Tianyu and Fisch, Adam and Chen, Danqi},
   year={2021} }

@misc{dang2022prompt,
    title={How to Prompt? Opportunities and Challenges of Zero- and Few-Shot Learning for Human-AI Interaction in Creative Applications of Generative Models},
    author={Hai Dang and Lukas Mecke and Florian Lehmann and Sven Goller and Daniel Buschek},
    year={2022},
    eprint={2209.01390},
    archivePrefix={arXiv},
    primaryClass={cs.HC}
}

@misc{akyrek2022measuring,
    title={On Measuring Social Biases in Prompt-Based Multi-Task Learning},
    author={Afra Feyza Akyürek and Sejin Paik and Muhammed Yusuf Kocyigit and Seda Akbiyik and Şerife Leman Runyun and Derry Wijaya},
    year={2022},
    eprint={2205.11605},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

@misc{jin2022plot,
    title={Plot Writing From Pre-Trained Language Models},
    author={Yiping Jin and Vishakha Kadam and Dittaya Wanvarie},
    year={2022},
    eprint={2206.03021},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

% bias in llms
@inproceedings{nadeem-etal-2021-stereoset,
    title = "{S}tereo{S}et: Measuring stereotypical bias in pretrained language models",
    author = "Nadeem, Moin  and
      Bethke, Anna  and
      Reddy, Siva",
    booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
    month = aug,
    year = "2021",
    address = "Online",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2021.acl-long.416",
    doi = "10.18653/v1/2021.acl-long.416",
    pages = "5356--5371",
    abstract = "A stereotype is an over-generalized belief about a particular group of people, e.g., Asians are good at math or African Americans are athletic. Such beliefs (biases) are known to hurt target groups. Since pretrained language models are trained on large real-world data, they are known to capture stereotypical biases. It is important to quantify to what extent these biases are present in them. Although this is a rapidly growing area of research, existing literature lacks in two important aspects: 1) they mainly evaluate bias of pretrained language models on a small set of artificial sentences, even though these models are trained on natural data 2) current evaluations focus on measuring bias without considering the language modeling ability of a model, which could lead to misleading trust on a model even if it is a poor language model. We address both these problems. We present StereoSet, a large-scale natural English dataset to measure stereotypical biases in four domains: gender, profession, race, and religion. We contrast both stereotypical bias and language modeling ability of popular models like BERT, GPT-2, RoBERTa, and XLnet. We show that these models exhibit strong stereotypical biases. Our data and code are available at https://stereoset.mit.edu.",
}

% hallucinations in llms
@article{Ji_2022,
	doi = {10.1145/3571730},
	url = {https://doi.org/10.1145%2F3571730},
	year = 2022,
	month = {nov},
	publisher = {Association for Computing Machinery ({ACM})},
	author = {Ziwei Ji and Nayeon Lee and Rita Frieske and Tiezheng Yu and Dan Su and Yan Xu and Etsuko Ishii and Yejin Bang and Andrea Madotto and Pascale Fung},
	title = {Survey of Hallucination in Natural Language Generation},
	journal = {{ACM} Computing Surveys}
}

@inproceedings{yuan2022wordcraft,
  title={Wordcraft: Story Writing With Large Language Models},
  author={Yuan, Ann and Coenen, Andy and Reif, Emily and Ippolito, Daphne},
  booktitle={27th International Conference on Intelligent User Interfaces},
  pages={841--852},
  year={2022}
}

@article{fadnavis2022pain,
  title={PainPoints: A Framework for Language-based Detection of Chronic Pain and Expert-Collaborative Text-Summarization},
  author={Fadnavis, Shreyas and Dhurandhar, Amit and Norel, Raquel and Reinen, Jenna M and Agurto, Carla and Secchettin, Erica and Schweiger, Vittorio and Perini, Giovanni and Cecchi, Guillermo},
  journal={arXiv preprint arXiv:2209.09814},
  year={2022}
}

@misc{wang2022selfinstruct,
    title={Self-Instruct: Aligning Language Model with Self Generated Instructions},
    author={Yizhong Wang and Yeganeh Kordi and Swaroop Mishra and Alisa Liu and Noah A. Smith and Daniel Khashabi and Hannaneh Hajishirzi},
    year={2022},
    eprint={2212.10560},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

@misc{guo2022images,
    title={From Images to Textual Prompts: Zero-shot VQA with Frozen Large Language Models},
    author={Jiaxian Guo and Junnan Li and Dongxu Li and Anthony Meng Huat Tiong and Boyang Li and Dacheng Tao and Steven C. H. Hoi},
    year={2022},
    eprint={2212.10846},
    archivePrefix={arXiv},
    primaryClass={cs.CV}
}



@misc{markov_2022,
    title={New and improved content moderation tooling},
    url={https://openai.com/blog/new-and-improved-content-moderation-tooling/},
    journal={OpenAI},
    publisher={OpenAI},
    author={Markov, Todor},
    year={2022},
    month={Dec}
}

@misc{openai_api,
    url={https://beta.openai.com/docs/guides/moderation},
    year = {2022},
    author={OpenAI}
}

@misc{openai_chatgpt,
    url={https://openai.com/blog/chatgpt/},
    year = {2022},
    author={OpenAI}
}

% def of verbalizer
@misc{schick2020exploiting,
    title={Exploiting Cloze Questions for Few Shot Text Classification and Natural Language Inference},
    author={Timo Schick and Hinrich Schütze},
    year={2020},
    eprint={2001.07676},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}


@article{lake2015human,
  title={Human-level concept learning through probabilistic program induction},
  author={Lake, Brenden M and Salakhutdinov, Ruslan and Tenenbaum, Joshua B},
  journal={Science},
  volume={350},
  number={6266},
  pages={1332--1338},
  year={2015},
  publisher={American Association for the Advancement of Science}
}




% music
@software{Forsgren_Martiros_2022,
  author = {Forsgren, Seth* and Martiros, Hayk*},
  title = {{Riffusion - Stable diffusion for real-time music generation}},
  url = {https://riffusion.com/about},
  year = {2022}
}

% writing
@article{bonta2022how,
    url={https://www.streak.com/post/how-to-use-ai-to-write-perfect-cold-emails},
    title = {How to use OpenAI’s ChatGPT to write the perfect cold email},
    author = {Alice Bonta},
    year = {2022},
    month = {dec},
    day = {7}
}

% cacti
@book{nobel2002cacti,
  title={Cacti: biology and uses},
  author={Nobel, Park S and others},
  year={2002},
  publisher={Univ of California Press}
}

% performance with misleading prompts
@article{webson2023itscomplicated,
  title={Are Language Models Worse than Humans at Following Prompts? It’s Complicated},
  author={Albert Webson and Alyssa Marie Loo and Qinan Yu and Ellie Pavlick},
  journal={arXiv:2301.07085v1 [cs.CL]},
  year={2023}
}







@misc{wang2023unleashing,
      title={Unleashing Cognitive Synergy in Large Language Models: A Task-Solving Agent through Multi-Persona Self-Collaboration}, 
      author={Zhenhailong Wang and Shaoguang Mao and Wenshan Wu and Tao Ge and Furu Wei and Heng Ji},
      year={2023},
      eprint={2307.05300},
      archivePrefix={arXiv},
      primaryClass={cs.AI}
}

