Datasets:

Modalities:
Text
Formats:
parquet
Size:
< 1K
ArXiv:
DOI:
Libraries:
Datasets
pandas
License:
codelion commited on
Commit
6ffdbc7
·
verified ·
1 Parent(s): ea44e2f

Upload 2 files

Browse files
Files changed (2) hide show
  1. requirements.txt +157 -0
  2. static-analysis-eval.py +193 -0
requirements.txt ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.29.3
2
+ aiohttp==3.9.5
3
+ aiosignal==1.3.1
4
+ annotated-types==0.6.0
5
+ anthropic==0.25.6
6
+ anyio==4.3.0
7
+ appdirs==1.4.4
8
+ asttokens==2.4.1
9
+ asyncio==3.4.3
10
+ attrs==23.2.0
11
+ bitsandbytes==0.42.0
12
+ blinker==1.8.2
13
+ boltons==21.0.0
14
+ bracex==2.4
15
+ cachetools==5.3.3
16
+ certifi==2024.2.2
17
+ cffi==1.16.0
18
+ charset-normalizer==3.3.2
19
+ click==8.1.7
20
+ click-option-group==0.5.6
21
+ colorama==0.4.6
22
+ contourpy==1.2.1
23
+ cryptography==42.0.8
24
+ cycler==0.12.1
25
+ datasets==2.19.0
26
+ defusedxml==0.7.1
27
+ Deprecated==1.2.14
28
+ dill==0.3.8
29
+ diskcache==5.6.3
30
+ distro==1.9.0
31
+ docker==7.0.0
32
+ docker-pycreds==0.4.0
33
+ docstring_parser==0.16
34
+ einops==0.7.0
35
+ exceptiongroup==1.2.1
36
+ face==22.0.0
37
+ filelock==3.13.4
38
+ FLAML==2.1.2
39
+ Flask==3.0.3
40
+ fonttools==4.53.0
41
+ frozenlist==1.4.1
42
+ fsspec==2024.3.1
43
+ gitdb==4.0.11
44
+ GitPython==3.1.43
45
+ glom==22.1.0
46
+ google-ai-generativelanguage==0.6.6
47
+ google-api-core==2.18.0
48
+ google-api-python-client==2.140.0
49
+ google-auth==2.29.0
50
+ google-auth-httplib2==0.2.0
51
+ google-auth-oauthlib==1.2.1
52
+ google-generativeai==0.7.2
53
+ googleapis-common-protos==1.63.0
54
+ gradio_client==0.15.1
55
+ grpcio==1.62.2
56
+ grpcio-status==1.62.2
57
+ h11==0.14.0
58
+ httpcore==1.0.5
59
+ httplib2==0.22.0
60
+ httpx==0.25.2
61
+ huggingface-hub==0.22.2
62
+ idna==3.7
63
+ itsdangerous==2.2.0
64
+ Jinja2==3.1.3
65
+ joblib==1.4.0
66
+ jsonschema==4.21.1
67
+ jsonschema-specifications==2023.12.1
68
+ kiwisolver==1.4.5
69
+ lark==1.1.9
70
+ markdown-it-py==3.0.0
71
+ MarkupSafe==2.1.5
72
+ matplotlib==3.9.0
73
+ mdurl==0.1.2
74
+ mistralai==0.1.8
75
+ mpmath==1.3.0
76
+ multidict==6.0.5
77
+ multiprocess==0.70.16
78
+ networkx==3.3
79
+ nltk==3.8.1
80
+ numpy==1.26.4
81
+ oauthlib==3.2.2
82
+ openai==1.20.0
83
+ orjson==3.10.1
84
+ packaging==24.0
85
+ pandas==2.2.2
86
+ peewee==3.17.3
87
+ peft @ git+https://github.com/huggingface/peft.git@5a4b9cade64bac8afdff5006ee9dd815c90b5469
88
+ pillow==10.3.0
89
+ proto-plus==1.23.0
90
+ protobuf==4.25.3
91
+ psutil==5.9.8
92
+ pyarrow==15.0.2
93
+ pyarrow-hotfix==0.6
94
+ pyasn1==0.6.0
95
+ pyasn1_modules==0.4.0
96
+ pyautogen==0.2.26
97
+ pycparser==2.22
98
+ pydantic==2.7.0
99
+ pydantic_core==2.18.1
100
+ PyGithub==2.3.0
101
+ Pygments==2.17.2
102
+ PyJWT==2.8.0
103
+ PyNaCl==1.5.0
104
+ pyparsing==3.1.2
105
+ PyQt5==5.15.10
106
+ PyQt5-Qt5==5.15.14
107
+ PyQt5-sip==12.13.0
108
+ PySMT==0.9.6
109
+ python-dateutil==2.9.0.post0
110
+ python-dotenv==1.0.1
111
+ pytz==2024.1
112
+ PyYAML==6.0.1
113
+ referencing==0.34.0
114
+ regex==2024.4.16
115
+ requests==2.31.0
116
+ requests-oauthlib==2.0.0
117
+ rich==13.7.1
118
+ rpds-py==0.18.0
119
+ rsa==4.9
120
+ ruamel.yaml==0.17.40
121
+ ruamel.yaml.clib==0.2.8
122
+ safetensors==0.4.3
123
+ scikit-learn==1.4.2
124
+ scipy==1.13.0
125
+ semgrep==1.69.0
126
+ sentence-transformers==2.7.0
127
+ sentry-sdk==1.45.0
128
+ setproctitle==1.3.3
129
+ setuptools==69.5.1
130
+ shtab==1.7.1
131
+ six==1.16.0
132
+ smmap==5.0.1
133
+ sniffio==1.3.1
134
+ sympy==1.12
135
+ tabulate==0.9.0
136
+ termcolor==2.4.0
137
+ threadpoolctl==3.4.0
138
+ tiktoken==0.6.0
139
+ tokenizers==0.19.1
140
+ tomli==2.0.1
141
+ torch==2.2.2
142
+ tqdm==4.66.2
143
+ transformers @ git+https://github.com/huggingface/transformers.git@8c12690cecbb97e187861e386f7a0ac790e4236c
144
+ trl==0.8.5
145
+ typing_extensions==4.11.0
146
+ tyro==0.8.3
147
+ tzdata==2024.1
148
+ uritemplate==4.1.1
149
+ urllib3==2.2.1
150
+ wandb==0.16.6
151
+ wcmatch==8.5.1
152
+ websockets==11.0.3
153
+ Werkzeug==3.0.3
154
+ wrapt==1.16.0
155
+ xxhash==3.4.1
156
+ yarl==1.9.4
157
+ z3-solver==4.13.0.0
static-analysis-eval.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import random
4
+ import pickle
5
+ import time
6
+ from openai import OpenAI
7
+ from tqdm import tqdm
8
+ from functools import partial
9
+ import multiprocessing
10
+ from datasets import load_dataset
11
+ from sklearn.feature_extraction.text import TfidfVectorizer
12
+ from sklearn.metrics.pairwise import cosine_similarity
13
+ import numpy as np
14
+
15
+ client = OpenAI()
16
+
17
+ def load_cache():
18
+ if os.path.exists('cache.pkl'):
19
+ with open('cache.pkl', 'rb') as f:
20
+ return pickle.load(f)
21
+ return {}
22
+
23
+ def save_cache(cache):
24
+ with open('cache.pkl', 'wb') as f:
25
+ pickle.dump(cache, f)
26
+
27
+ def fetch_dataset_examples(prompt, num_examples=3, use_similarity=True):
28
+ dataset = load_dataset("patched-codes/synth-vuln-fixes", split="train")
29
+
30
+ if use_similarity:
31
+ user_messages = [
32
+ next(msg['content'] for msg in item['messages'] if msg['role'] == 'user')
33
+ for item in dataset
34
+ ]
35
+
36
+ vectorizer = TfidfVectorizer().fit(user_messages + [prompt])
37
+ user_vectors = vectorizer.transform(user_messages)
38
+ prompt_vector = vectorizer.transform([prompt])
39
+
40
+ similarities = cosine_similarity(prompt_vector, user_vectors)[0]
41
+ top_indices = np.argsort(similarities)[-num_examples:][::-1]
42
+ else:
43
+ top_indices = np.random.choice(len(dataset), num_examples, replace=False)
44
+
45
+ few_shot_messages = []
46
+ for index in top_indices:
47
+ py_index = int(index)
48
+ messages = dataset[py_index]["messages"]
49
+
50
+ dialogue = [msg for msg in messages if msg['role'] != 'system']
51
+ few_shot_messages.extend(dialogue)
52
+
53
+ return few_shot_messages
54
+
55
+ def get_fixed_code_fine_tuned(prompt, few_shot_messages):
56
+ system_message = (
57
+ "You are an AI assistant specialized in fixing code vulnerabilities. "
58
+ "Your task is to provide corrected code that addresses the reported security issue. "
59
+ "Always maintain the original functionality while improving security. "
60
+ "Be precise and make only necessary changes. "
61
+ "Maintain the original code style and formatting unless it directly relates to the vulnerability. "
62
+ "Pay attention to data flow between sources and sinks when provided."
63
+ )
64
+
65
+ messages = [
66
+ {"role": "system", "content": system_message},
67
+ ]
68
+
69
+ messages.extend(few_shot_messages)
70
+ messages.append({"role": "user", "content": prompt})
71
+
72
+ response = client.chat.completions.create(
73
+ model="gpt-4o-mini",
74
+ messages=messages,
75
+ max_tokens=512,
76
+ temperature=0.2,
77
+ top_p=0.95
78
+ )
79
+
80
+ try:
81
+ return response.choices[0].message.content
82
+ except Exception as e:
83
+ raise Exception(f"API call failed: {str(e)}")
84
+
85
+ def process_file(test_case, cache):
86
+ file_name = test_case["file_name"]
87
+ input_file = "staticeval/" + file_name
88
+
89
+ if input_file in cache:
90
+ tqdm.write(f"Skipping {input_file} (cached)")
91
+ return cache[input_file]
92
+
93
+ file_text = test_case["source"]
94
+ test_cwe = test_case["cwe"].strip()
95
+ output_file = input_file + "_fixed.py"
96
+ tmp_file = input_file + ".output.json"
97
+
98
+ with open(input_file, "w") as file_object:
99
+ file_object.write(file_text)
100
+
101
+ if os.path.exists(tmp_file):
102
+ os.remove(tmp_file)
103
+
104
+ tqdm.write("Scanning file " + input_file + "...")
105
+ scan_command_input = f"semgrep --config p/python {input_file} --output {tmp_file} --json > /dev/null 2>&1"
106
+ os.system(scan_command_input)
107
+
108
+ with open(tmp_file, 'r') as jf:
109
+ data = json.load(jf)
110
+
111
+ if len(data["errors"]) == 0:
112
+ if len(data["results"]) == 0:
113
+ tqdm.write(input_file + " has no vulnerabilities")
114
+ result = False
115
+ else:
116
+ tqdm.write("Vulnerability found in " + input_file + "...")
117
+ cwe = data["results"][0]["extra"]["metadata"]["cwe"][0]
118
+ lines = data["results"][0]["extra"]["lines"]
119
+ message = data["results"][0]["extra"]["message"]
120
+
121
+ prompt = f"""Vulnerability Report:
122
+ - Type: {cwe}
123
+ - Location: {lines}
124
+ - Description: {message}
125
+
126
+ Original Code:
127
+ ```
128
+ {file_text}
129
+ ```
130
+
131
+ Task: Fix the vulnerability in the code above. Provide only the complete fixed code without explanations or comments. Make minimal changes necessary to address the security issue while preserving the original functionality."""
132
+
133
+ few_shot_messages = fetch_dataset_examples(prompt, 3, True)
134
+ response = get_fixed_code_fine_tuned(prompt, few_shot_messages)
135
+
136
+ if "```python" in response:
137
+ idx = response.find("```python")
138
+ shift = len("```python")
139
+ fixed_code = response[idx + shift :]
140
+ else:
141
+ fixed_code = response
142
+
143
+ stop_words = ["```", "assistant"]
144
+ for w in stop_words:
145
+ if w in fixed_code:
146
+ fixed_code = fixed_code[:fixed_code.find(w)]
147
+
148
+ if len(fixed_code) < 400 or all(line.strip().startswith("#") for line in fixed_code.split('\n') if line.strip()):
149
+ result = False
150
+ else:
151
+ with open(output_file, 'w') as wf:
152
+ wf.write(fixed_code)
153
+
154
+ scan_command_output = f"semgrep --config p/python {output_file} --output {tmp_file} --json > /dev/null 2>&1"
155
+ os.system(scan_command_output)
156
+
157
+ with open(tmp_file, 'r') as jf:
158
+ data = json.load(jf)
159
+
160
+ if len(data["errors"]) == 0 and len(data["results"]) == 0:
161
+ tqdm.write("Passing response for " + input_file + " at 1 ...")
162
+ result = True
163
+ else:
164
+ result = False
165
+
166
+ if os.path.exists(tmp_file):
167
+ os.remove(tmp_file)
168
+
169
+ cache[input_file] = result
170
+ save_cache(cache)
171
+ return result
172
+
173
+ def process_test_case(test_case, cache):
174
+ return process_file(test_case, cache)
175
+
176
+ def main():
177
+ dataset = load_dataset("patched-codes/static-analysis-eval", split="train")
178
+ data = [{"file_name": item["file_name"], "source": item["source"], "cwe": item["cwe"]} for item in dataset]
179
+
180
+ cache = load_cache()
181
+ total_tests = len(data)
182
+
183
+ process_func = partial(process_test_case, cache=cache)
184
+
185
+ with multiprocessing.Pool() as pool:
186
+ results = list(tqdm(pool.imap_unordered(process_func, data), total=total_tests))
187
+
188
+ passing_tests = sum(results)
189
+
190
+ print(f"Results for StaticAnalysisEval: {passing_tests/total_tests*100}%")
191
+
192
+ if __name__ == '__main__':
193
+ main()