Titovs commited on
Commit
819b804
1 Parent(s): 3ecc27e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +23 -23
README.md CHANGED
@@ -44,22 +44,23 @@ During the code generation step, we use early stopping on the `}\n}` sequence to
44
  The early stopping method, post-processing steps, and evaluation code are available in the example below.
45
 
46
  ```python
47
- import torch
48
- import jsonlines
49
  import re
50
- from tqdm import tqdm
 
 
 
51
  from transformers import (
52
  AutoTokenizer,
53
  AutoModelForCausalLM,
54
  StoppingCriteria,
55
  StoppingCriteriaList,
56
  )
57
- from mxeval.data import get_data
58
  from mxeval.evaluation import evaluate_functional_correctness
59
- from datasets import load_dataset
60
 
61
  class StoppingCriteriaSub(StoppingCriteria):
62
-
63
  def __init__(self, stops, tokenizer):
64
  (StoppingCriteria.__init__(self),)
65
  self.stops = rf"{stops}"
@@ -75,16 +76,8 @@ class StoppingCriteriaSub(StoppingCriteria):
75
 
76
 
77
  def generate(problem):
78
-
79
-
80
- stopping_criteria = StoppingCriteriaList(
81
- [
82
- StoppingCriteriaSub(
83
- stops= "\n}\n", tokenizer=tokenizer
84
- )
85
- ]
86
- )
87
-
88
 
89
  problem = tokenizer.encode(problem, return_tensors="pt").to('cuda')
90
  sample = model.generate(
@@ -99,22 +92,20 @@ def generate(problem):
99
  )
100
 
101
  answer = tokenizer.decode(sample[0], skip_special_tokens=True)
102
-
103
  return answer
104
 
105
- def clean_asnwer(code):
106
 
 
107
  # Clean comments
108
  code_without_line_comments = re.sub(r"//.*", "", code)
109
  code_without_all_comments = re.sub(
110
  r"/\*.*?\*/", "", code_without_line_comments, flags=re.DOTALL
111
  )
112
-
113
  #Clean signatures
114
  lines = code.split("\n")
115
  for i, line in enumerate(lines):
116
  if line.startswith("fun "):
117
- return "\n".join(lines[i + 1 :])
118
 
119
  return code
120
 
@@ -123,10 +114,9 @@ model_name = "JetBrains/CodeLlama-7B-Kexer"
123
  dataset = load_dataset("jetbrains/Kotlin_HumanEval")['train']
124
  problem_dict = {problem['task_id']: problem for problem in dataset}
125
 
126
- model = AutoModelForCausalLM.from_pretrained(model_name,torch_dtype=torch.bfloat16).to('cuda')
127
  tokenizer = AutoTokenizer.from_pretrained(model_name)
128
 
129
-
130
  output = []
131
  for key in tqdm(list(problem_dict.keys()), leave=False):
132
  problem = problem_dict[key]["prompt"]
@@ -134,7 +124,6 @@ for key in tqdm(list(problem_dict.keys()), leave=False):
134
  answer = clean_asnwer(answer)
135
  output.append({"task_id": key, "completion": answer, "language": "kotlin"})
136
 
137
-
138
  output_file = f"answers"
139
  with jsonlines.open(output_file, mode="w") as writer:
140
  for line in output:
@@ -148,6 +137,17 @@ evaluate_functional_correctness(
148
  problem_file=problem_dict,
149
  )
150
 
 
 
 
 
 
 
 
 
 
 
 
151
  ```
152
 
153
 
 
44
  The early stopping method, post-processing steps, and evaluation code are available in the example below.
45
 
46
  ```python
47
+ import json
 
48
  import re
49
+
50
+ from datasets import load_dataset
51
+ import jsonlines
52
+ import torch
53
  from transformers import (
54
  AutoTokenizer,
55
  AutoModelForCausalLM,
56
  StoppingCriteria,
57
  StoppingCriteriaList,
58
  )
59
+ from tqdm import tqdm
60
  from mxeval.evaluation import evaluate_functional_correctness
61
+
62
 
63
  class StoppingCriteriaSub(StoppingCriteria):
 
64
  def __init__(self, stops, tokenizer):
65
  (StoppingCriteria.__init__(self),)
66
  self.stops = rf"{stops}"
 
76
 
77
 
78
  def generate(problem):
79
+ criterion = StoppingCriteriaSub(stops="\n}\n", tokenizer=tokenizer)
80
+ stopping_criteria = StoppingCriteriaList([criterion])
 
 
 
 
 
 
 
 
81
 
82
  problem = tokenizer.encode(problem, return_tensors="pt").to('cuda')
83
  sample = model.generate(
 
92
  )
93
 
94
  answer = tokenizer.decode(sample[0], skip_special_tokens=True)
 
95
  return answer
96
 
 
97
 
98
+ def clean_asnwer(code):
99
  # Clean comments
100
  code_without_line_comments = re.sub(r"//.*", "", code)
101
  code_without_all_comments = re.sub(
102
  r"/\*.*?\*/", "", code_without_line_comments, flags=re.DOTALL
103
  )
 
104
  #Clean signatures
105
  lines = code.split("\n")
106
  for i, line in enumerate(lines):
107
  if line.startswith("fun "):
108
+ return "\n".join(lines[i + 1:])
109
 
110
  return code
111
 
 
114
  dataset = load_dataset("jetbrains/Kotlin_HumanEval")['train']
115
  problem_dict = {problem['task_id']: problem for problem in dataset}
116
 
117
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16).to('cuda')
118
  tokenizer = AutoTokenizer.from_pretrained(model_name)
119
 
 
120
  output = []
121
  for key in tqdm(list(problem_dict.keys()), leave=False):
122
  problem = problem_dict[key]["prompt"]
 
124
  answer = clean_asnwer(answer)
125
  output.append({"task_id": key, "completion": answer, "language": "kotlin"})
126
 
 
127
  output_file = f"answers"
128
  with jsonlines.open(output_file, mode="w") as writer:
129
  for line in output:
 
137
  problem_file=problem_dict,
138
  )
139
 
140
+ with open(output_file + '_results.jsonl') as fp:
141
+ total = 0
142
+ correct = 0
143
+ for line in fp:
144
+ sample_res = json.loads(line)
145
+ print(sample_res)
146
+ total += 1
147
+ correct += sample_res['passed']
148
+
149
+ print(f'Pass rate: {correct/total}')
150
+
151
  ```
152
 
153