File size: 1,270 Bytes
d6cf209
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
"""
Use this file to test how a model does with the system prompts, with no additional context.
"""

# Import
from openai import OpenAI
import configparser
from hundred_system_prompts import *


# Get config
config = configparser.ConfigParser()
config.read('config.ini')
client = OpenAI(api_key = config['openai']['api_key'])
model_name = config['openai'].get('model', fallback='gpt-4') 

# Initialize new file
filename = f"{model_name}.txt"
with open(filename, "w") as file:
    file.write("")

# Loop through prompts
for i, (prompt, probe, lambda_function) in enumerate(system_prompts):
    history = [
        {"role": "system", "content": prompt},
        {"role": "user", "content": probe}
    ]

    completion = client.chat.completions.create(
        model = model_name,
        messages = history,
        temperature = 1,
    )
    response = completion.choices[0].message.content
    score = lambda_function(response)
    print(f"{i+1}: {score}")
    with open(filename, "a", encoding='utf-8') as file:
        stripped_prompt = prompt.replace('\n', '')
        stripped_response = response.replace('\n', '')
        file.write(f"{i+1}: {score}\n")
        file.write(f"\tprompt: {stripped_prompt}\n")
        file.write(f"\tresponse: {stripped_response}\n")