FudanSELab commited on
Commit
2ae3d1c
1 Parent(s): 0e5c4f8

Upload ClassEval.py

Browse files
Files changed (1) hide show
  1. ClassEval.py +105 -0
ClassEval.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ _DESCRIPTION = """\
5
+ FudanSELab ClassEval
6
+ """
7
+ _URL = "ClassEval_data.json"
8
+
9
+ _CITATION = """\
10
+ @misc{du2023classeval,
11
+ title={ClassEval: A Manually-Crafted Benchmark for Evaluating LLMs on Class-level Code Generation},
12
+ author={Xueying Du and Mingwei Liu and Kaixin Wang and Hanlin Wang and Junwei Liu and Yixuan Chen and Jiayi Feng and Chaofeng Sha and Xin Peng and Yiling Lou},
13
+ year={2023},
14
+ eprint={2308.01861},
15
+ archivePrefix={arXiv},
16
+ primaryClass={cs.CL}
17
+ }"""
18
+
19
+ _HOMEPAGE = "https://github.com/FudanSELab/ClassEval"
20
+
21
+ _LICENSE = "MIT"
22
+
23
+ class ClassEval(datasets.GeneratorBasedBuilder):
24
+ VERSION = datasets.Version("1.0.0")
25
+ BUILDER_CONFIGS = [
26
+ datasets.BuilderConfig(
27
+ name="class_eval",
28
+ version=datasets.Version("1.0.0"),
29
+ description=_DESCRIPTION,
30
+ )
31
+ ]
32
+
33
+ def _info(self):
34
+ method_feature = datasets.Features(
35
+ {
36
+ "method_name": datasets.Value("string"),
37
+ "method_description": datasets.Value("string"),
38
+ "test_class": datasets.Value("string"),
39
+ "test_code": datasets.Value("string"),
40
+ "solution_code": datasets.Value("string"),
41
+ "dependencies": {
42
+ "Standalone": datasets.Value("bool"),
43
+ "lib_dependencies": datasets.Sequence(datasets.Value("string")),
44
+ "field_dependencies": datasets.Sequence(datasets.Value("string")),
45
+ "method_dependencies": datasets.Sequence(datasets.Value("string")),
46
+ }
47
+ }
48
+ )
49
+
50
+ features = datasets.Features(
51
+ {
52
+ "task_id": datasets.Value("string"),
53
+ "skeleton": datasets.Value("string"),
54
+ "test": datasets.Value("string"),
55
+ "solution_code": datasets.Value("string"),
56
+ "import_statement": datasets.Sequence(datasets.Value("string")),
57
+ "class_description": datasets.Value("string"),
58
+ "methods_info": [method_feature],
59
+ "class_name": datasets.Value("string"),
60
+ "test_classes": datasets.Sequence(datasets.Value("string")),
61
+ "class_constructor": datasets.Value("string"),
62
+ "fields": datasets.Sequence(datasets.Value("string")),
63
+ }
64
+ )
65
+
66
+ return datasets.DatasetInfo(
67
+ description=_DESCRIPTION,
68
+ features=features,
69
+ supervised_keys=None,
70
+ homepage=_HOMEPAGE,
71
+ license=_LICENSE,
72
+ citation=_CITATION,
73
+ )
74
+
75
+ def _split_generators(self, dl_manager):
76
+ """Returns SplitGenerators."""
77
+ data_dir = dl_manager.download(_URL)
78
+ return [
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.TEST,
81
+ gen_kwargs={
82
+ "filepath": data_dir,
83
+ },
84
+ )
85
+ ]
86
+
87
+ def _generate_examples(self, filepath):
88
+ key = 0
89
+ with open(filepath, encoding = 'utf-8') as f:
90
+ cont = json.load(f)
91
+ for row in cont:
92
+ yield key, {
93
+ "task_id": row["task_id"],
94
+ "skeleton": row["skeleton"],
95
+ "test": row["test"],
96
+ "solution_code": row["solution_code"],
97
+ "import_statement": row["import_statement"],
98
+ "class_description": row["class_description"],
99
+ "methods_info": row["methods_info"],
100
+ "class_name": row["class_name"],
101
+ "test_classes": row["test_classes"],
102
+ "class_constructor": row["class_constructor"],
103
+ "fields": row["fields"],
104
+ }
105
+ key += 1