TITLE = '
🎉 🎉 The OpsEval Leaderboard 👏 👏
'
INTRODUCTION_TEXT = '''
# 🚀 About OpsEval
The OpsEval dataset represents a pioneering effort in the evaluation of Artificial Intelligence for IT Operations (AIOps), focusing on the application of Large Language Models (LLMs) within this domain. In an era where IT operations are increasingly reliant on AI technologies for automation and efficiency, understanding the performance of LLMs in operational tasks becomes crucial. OpsEval offers a comprehensive task-oriented benchmark specifically designed for assessing LLMs in various crucial IT Ops scenarios.
This dataset is motivated by the emerging trend of utilizing AI in automated IT operations, as predicted by Gartner, and the remarkable capabilities exhibited by LLMs in NLP-related tasks. OpsEval aims to bridge the gap in evaluating these models' performance in AIOps tasks, including root cause analysis of failures, generation of operations and maintenance scripts, and summarizing alert information.
# 📃 Citation
```
@misc{liu2023opseval,
title={OpsEval: A Comprehensive Task-Oriented AIOps Benchmark for Large Language Models},
author={Yuhe Liu and Changhua Pei and Longlong Xu and Bohan Chen and Mingze Sun and Zhirui Zhang and Yongqian Sun and Shenglin Zhang and Kun Wang and Haiming Zhang and Jianhui Li and Gaogang Xie and Xidao Wen and Xiaohui Nie and Dan Pei},
year={2023},
eprint={2310.07637},
archivePrefix={arXiv},
primaryClass={cs.AI}
}
```
'''
ZH_TITLE = '🎉 🎉 OpsEval 排行榜 👏 👏
'
ZH_INTRODUCTION_TEXT = '''
# 🚀 关于 OpsEval
OpsEval 数据集代表了在 IT 运维(AIOps)领域评估人工智能(AI)的一次开创性努力,重点关注大型语言模型(LLMs)在该领域的应用。在一个越来越依赖 AI 技术进行自动化和提高效率的 IT 运维时代,了解 LLMs 在运维任务中的表现变得至关重要。OpsEval 提供了一个全面的任务导向基准,专门用于评估 LLMs 在各种重要 IT 运维场景中的表现。
该数据集的动机源于 Gartner 预测的利用 AI 自动化 IT 运维的趋势,以及 LLMs 在自然语言处理(NLP)相关任务中展示的显著能力。OpsEval 旨在弥合评估这些模型在 AIOps 任务中的表现的差距,包括故障根因分析、运维脚本的生成和警报信息的总结。
# 📃 引用
```
@misc{liu2023opseval,
title={OpsEval: A Comprehensive Task-Oriented AIOps Benchmark for Large Language Models},
author={Yuhe Liu and Changhua Pei and Longlong Xu and Bohan Chen and Mingze Sun and Zhirui Zhang and Yongqian Sun and Shenglin Zhang and Kun Wang and Haiming Zhang and Jianhui Li and Gaogang Xie and Xidao Wen and Xiaohui Nie and Dan Pei},
year={2023},
eprint={2310.07637},
archivePrefix={arXiv},
primaryClass={cs.AI}
}
```
'''