|
|
|
|
|
DATA_DIR="./data/data.json" |
|
|
|
|
|
|
|
MODEL_INFO = ["Model Name", "Language Model"] |
|
AVG_INFO = ["Avg. All"] |
|
ME_INFO=["Method Name", "Language Model"] |
|
|
|
|
|
KE_Data_INFO = ["FewNERD", "FewRel", "InstructIE-en", "MAVEN","WikiEvents"] |
|
|
|
KE_TASK_INFO = ["Avg. All", "FewNERD", "FewRel", "InstructIE-en", "MAVEN","WikiEvents"] |
|
KE_CSV_DIR = "./ke_files/result-kgc.csv" |
|
DATA_COLUMN_NAMES =["locality","labels","concept","text"] |
|
KE_TABLE_INTRODUCTION = """In the table below, we summarize each task performance of all the models. We use F1 score(%) as the primary evaluation metric for each tasks. |
|
""" |
|
RESULT_COLUMN_NAMES= ["DataSet","Metric","Metric","ICE","AdaLoRA","MEND","ROME","MEMIT","FT-L","FT"] |
|
DATA_STRUCT=""" |
|
Datasets ZsRE Wikirecent Wikicounterfact WikiBio |
|
Train 10,000 570 1455 592 |
|
Test 1230 1266 885 1392 |
|
""" |
|
TITLE = """# KnowEdit: a dataset for knowledge editing""" |
|
|
|
BACKGROUND=""" |
|
Large Language Models (LLMs) have shown extraordinary capabilities in understanding and generating text that closely mirrors human communication. However, a primary limitation lies in the significant computational demands during training, arising from their extensive parameterization.There is an increasing interest in efficient, lightweight methods for onthe-fly model modifications. To this end, recent years have seen a burgeoning in the techniques of knowledge editing for LLMs, which aim to efficiently modify LLMs’ behaviors within specific domains while preserving overall performance across various inputs. |
|
""" |
|
|
|
LEADERBORAD_INTRODUCTION = """ |
|
This is the dataset for knowledge editing. It contains six tasks: ZsRE, Wiki<sub>recent</sub>, Wiki<sub>counterfact</sub>, WikiBio, ConvSent and Sanitation. This repo shows the former 4 tasks and you can get the data for ConvSent and Sanitation from their original papers. |
|
""" |
|
DATA_SCHEMA =""" { |
|
"subject": xxx, |
|
"target_new": xxx, |
|
"prompt": xxx, |
|
"portability":{ |
|
"Logical_Generalization": [], |
|
... |
|
} |
|
"locality":{ |
|
"Relation_Specificity": [], |
|
... |
|
} |
|
}""" |
|
|
|
|
|
|
|
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" |
|
CITATION_BUTTON_TEXT = r"""@article{tan2023evaluation, |
|
title={Evaluation of ChatGPT as a question answering system for answering complex questions}, |
|
author={Yiming Tan and Dehai Min and Yu Li and Wenbo Li and Nan Hu and Yongrui Chen and Guilin Qi}, |
|
journal={arXiv preprint arXiv:2303.07992}, |
|
year={2023} |
|
} |
|
@article{gui2023InstructIE, |
|
author = {Honghao Gui and Jintian Zhang and Hongbin Ye and Ningyu Zhang}, |
|
title = {InstructIE: {A} Chinese Instruction-based Information Extraction Dataset}, |
|
journal = {arXiv preprint arXiv:2303.07992}, |
|
year = {2023} |
|
} |
|
@article{yao2023edit, |
|
author = {Yunzhi Yao and Peng Wang and Bozhong Tian and Siyuan Cheng and Zhoubo Li and Shumin Deng and Huajun Chen and Ningyu Zhang}, |
|
title = {Editing Large Language Models: Problems, Methods, and Opportunities}, |
|
journal = {arXiv preprint arXiv:2305.13172}, |
|
year = {2023} |
|
} |
|
""" |
|
|