llm-system-prompts-benchmark / run_benchmark.py
Naomibas's picture
Upload 4 files
d6cf209
"""
Use this file to test how a model does with the system prompts, with no additional context.
"""
# Import
from openai import OpenAI
import configparser
from hundred_system_prompts import *
# Get config
config = configparser.ConfigParser()
config.read('config.ini')
client = OpenAI(api_key = config['openai']['api_key'])
model_name = config['openai'].get('model', fallback='gpt-4')
# Initialize new file
filename = f"{model_name}.txt"
with open(filename, "w") as file:
file.write("")
# Loop through prompts
for i, (prompt, probe, lambda_function) in enumerate(system_prompts):
history = [
{"role": "system", "content": prompt},
{"role": "user", "content": probe}
]
completion = client.chat.completions.create(
model = model_name,
messages = history,
temperature = 1,
)
response = completion.choices[0].message.content
score = lambda_function(response)
print(f"{i+1}: {score}")
with open(filename, "a", encoding='utf-8') as file:
stripped_prompt = prompt.replace('\n', '')
stripped_response = response.replace('\n', '')
file.write(f"{i+1}: {score}\n")
file.write(f"\tprompt: {stripped_prompt}\n")
file.write(f"\tresponse: {stripped_response}\n")