% CoT
@misc{wei2022chain,
    title={Chain of Thought Prompting Elicits Reasoning in Large Language Models},
    author={Jason Wei and Xuezhi Wang and Dale Schuurmans and Maarten Bosma and Brian Ichter and Fei Xia and Ed Chi and Quoc Le and Denny Zhou},
    year={2022},
    eprint={2201.11903},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

0-shot CoT
@misc{kojima2022large,
    title={Large Language Models are Zero-Shot Reasoners},
    author={Takeshi Kojima and Shixiang Shane Gu and Machel Reid and Yutaka Matsuo and Yusuke Iwasawa},
    year={2022},
    eprint={2205.11916},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

% self consistency
@misc{wang2022selfconsistency,
    title={Self-Consistency Improves Chain of Thought Reasoning in Language Models},
    author={Xuezhi Wang and Jason Wei and Dale Schuurmans and Quoc Le and Ed Chi and Sharan Narang and Aakanksha Chowdhery and Denny Zhou},
    year={2022},
    eprint={2203.11171},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

@article{liu2021makes,
   title={What Makes Good In-Context Examples for GPT-3?},
   url={http://dx.doi.org/10.18653/v1/2022.deelio-1.10},
   DOI={10.18653/v1/2022.deelio-1.10},
   journal={Proceedings of Deep Learning Inside Out (DeeLIO 2022): The 3rd Workshop on Knowledge Extraction and Integration for Deep Learning Architectures},
   publisher={Association for Computational Linguistics},
   author={Liu, Jiachang and Shen, Dinghan and Zhang, Yizhe and Dolan, Bill and Carin, Lawrence and Chen, Weizhu},
   year={2022} }

% generated knowledge
@misc{liu2021generated,
    title={Generated Knowledge Prompting for Commonsense Reasoning},
    author={Jiacheng Liu and Alisa Liu and Ximing Lu and Sean Welleck and Peter West and Ronan Le Bras and Yejin Choi and Hannaneh Hajishirzi},
    year={2021},
    eprint={2110.08387},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

% recitation augmented
@misc{sun2022recitationaugmented,
    title={Recitation-Augmented Language Models},
    author={Zhiqing Sun and Xuezhi Wang and Yi Tay and Yiming Yang and Denny Zhou},
    year={2022},
    eprint={2210.01296},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

% structure over content in prompts
@misc{min2022rethinking,
    title={Rethinking the Role of Demonstrations: What Makes In-Context Learning Work?},
    author={Sewon Min and Xinxi Lyu and Ari Holtzman and Mikel Artetxe and Mike Lewis and Hannaneh Hajishirzi and Luke Zettlemoyer},
    year={2022},
    eprint={2202.12837},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

% scratchpads
@misc{nye2021work,
    title={Show Your Work: Scratchpads for Intermediate Computation with Language Models},
    author={Maxwell Nye and Anders Johan Andreassen and Guy Gur-Ari and Henryk Michalewski and Jacob Austin and David Bieber and David Dohan and Aitor Lewkowycz and Maarten Bosma and David Luan and Charles Sutton and Augustus Odena},
    year={2021},
    eprint={2112.00114},
    archivePrefix={arXiv},
    primaryClass={cs.LG}
}

% Maieutic
@misc{jung2022maieutic,
    title={Maieutic Prompting: Logically Consistent Reasoning with Recursive Explanations},
    author={Jaehun Jung and Lianhui Qin and Sean Welleck and Faeze Brahman and Chandra Bhagavatula and Ronan Le Bras and Yejin Choi},
    year={2022},
    eprint={2205.11822},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

% STaR
@misc{zelikman2022star,
    title={STaR: Bootstrapping Reasoning With Reasoning},
    author={Eric Zelikman and Yuhuai Wu and Jesse Mu and Noah D. Goodman},
    year={2022},
    eprint={2203.14465},
    archivePrefix={arXiv},
    primaryClass={cs.LG}
}

% least to most
@misc{zhou2022leasttomost,
    title={Least-to-Most Prompting Enables Complex Reasoning in Large Language Models},
    author={Denny Zhou and Nathanael Schärli and Le Hou and Jason Wei and Nathan Scales and Xuezhi Wang and Dale Schuurmans and Claire Cui and Olivier Bousquet and Quoc Le and Ed Chi},
    year={2022},
    eprint={2205.10625},
    archivePrefix={arXiv},
    primaryClass={cs.AI}
}

% instruction prompts
@article{mishra2022reframing,
    title={Reframing Instructional Prompts to GPTk’s Language},
    url={http://dx.doi.org/10.18653/v1/2022.findings-acl.50},
    DOI={10.18653/v1/2022.findings-acl.50},
    journal={Findings of the Association for Computational Linguistics: ACL 2022},
    publisher={Association for Computational Linguistics},
    author={Mishra, Swaroop and Khashabi, Daniel and Baral, Chitta and Choi, Yejin and Hajishirzi, Hannaneh},
    year={2022} 
}

% few shot
@inproceedings{logan-iv-etal-2022-cutting,
    title = "Cutting Down on Prompts and Parameters: Simple Few-Shot Learning with Language Models",
    author = "Logan IV, Robert  and
      Balazevic, Ivana  and
      Wallace, Eric  and
      Petroni, Fabio  and
      Singh, Sameer  and
      Riedel, Sebastian",
    booktitle = "Findings of the Association for Computational Linguistics: ACL 2022",
    month = may,
    year = "2022",
    address = "Dublin, Ireland",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2022.findings-acl.222",
    doi = "10.18653/v1/2022.findings-acl.222",
    pages = "2824--2835",
    abstract = "Prompting language models (LMs) with training examples and task descriptions has been seen as critical to recent successes in few-shot learning. In this work, we show that finetuning LMs in the few-shot setting can considerably reduce the need for prompt engineering. In fact, one can use null prompts, prompts that contain neither task-specific templates nor training examples, and achieve competitive accuracy to manually-tuned prompts across a wide range of tasks. While finetuning LMs does introduce new parameters for each downstream task, we show that this memory overhead can be substantially reduced: finetuning only the bias terms can achieve comparable or better accuracy than standard finetuning while only updating 0.1{\%} of the parameters. All in all, we recommend finetuning LMs for few-shot learning as it is more accurate, robust to different prompts, and can be made nearly as efficient as using frozen LMs.",
}

% role prompting
@misc{shanahan2023roleplay,
      title={Role-Play with Large Language Models}, 
      author={Murray Shanahan and Kyle McDonell and Laria Reynolds},
      year={2023},
      eprint={2305.16367},
      archivePrefix={arXiv},
      primaryClass={cs.CL}
}

@misc{li2023camel,
      title={CAMEL: Communicative Agents for "Mind" Exploration of Large Scale Language Model Society}, 
      author={Guohao Li and Hasan Abed Al Kader Hammoud and Hani Itani and Dmitrii Khizbullin and Bernard Ghanem},
      year={2023},
      eprint={2303.17760},
      archivePrefix={arXiv},
      primaryClass={cs.AI}
}

@misc{santu2023teler,
      title={TELeR: A General Taxonomy of LLM Prompts for Benchmarking Complex Tasks}, 
      author={Shubhra Kanti Karmaker Santu and Dongji Feng},
      year={2023},
      eprint={2305.11430},
      archivePrefix={arXiv},
      primaryClass={cs.AI}
}