Use gpt 4 turbo
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import csv
|
|
5 |
|
6 |
default_role = "I require someone who is an Irritable Bowel Syndrome doctor, Nutritionist and Chef, to recommend a single delicious recipe that uses low fodmap ingredients. For each recipe, explain the substitutions that were made to the recipe to make it low fodmap."
|
7 |
classification_msg = { "role": "user", "content" : "As an AI language model you are allowed to create tables in markdown format. Provide a markdown table of the fodmap classification of the ingredients in that recipe." }
|
|
|
8 |
|
9 |
def get_empty_state():
|
10 |
return {"total_tokens": 0, "messages": []}
|
@@ -42,17 +43,32 @@ def submit_message(user_token, prompt, prompt_template, good_foods, bad_foods, t
|
|
42 |
table = ""
|
43 |
|
44 |
try:
|
45 |
-
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
|
46 |
-
print(completion)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
history.append(prompt_msg)
|
|
|
49 |
history.append(completion.choices[0].message.to_dict())
|
50 |
|
51 |
state['total_tokens'] += completion['usage']['total_tokens']
|
52 |
|
53 |
-
completion2 = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-1:] + [classification_msg], temperature=temperature, max_tokens=max_tokens)
|
54 |
-
print(completion2)
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
table = completion2.choices[0].message.to_dict()['content'].split("\n\n")[1]
|
57 |
print(table)
|
58 |
|
|
|
5 |
|
6 |
default_role = "I require someone who is an Irritable Bowel Syndrome doctor, Nutritionist and Chef, to recommend a single delicious recipe that uses low fodmap ingredients. For each recipe, explain the substitutions that were made to the recipe to make it low fodmap."
|
7 |
classification_msg = { "role": "user", "content" : "As an AI language model you are allowed to create tables in markdown format. Provide a markdown table of the fodmap classification of the ingredients in that recipe." }
|
8 |
+
LLM_MODEL = 'gpt-4-1106-preview'
|
9 |
|
10 |
def get_empty_state():
|
11 |
return {"total_tokens": 0, "messages": []}
|
|
|
43 |
table = ""
|
44 |
|
45 |
try:
|
46 |
+
# completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
|
47 |
+
# print(completion)
|
48 |
+
client = OpenAI(api_key=OPEN_AI_KEY)
|
49 |
+
messages1 = system_prompt + food_priming_prompt + history[-context_length*2:] + [prompt_msg]
|
50 |
+
completion = client.chat.completions.create(
|
51 |
+
model=LLM_MODEL,
|
52 |
+
messages=messages1,
|
53 |
+
temperature=temperature,
|
54 |
+
max_tokens=max_tokens,
|
55 |
+
stream=False)
|
56 |
|
57 |
history.append(prompt_msg)
|
58 |
+
answer = completion.choices[0].message.content
|
59 |
history.append(completion.choices[0].message.to_dict())
|
60 |
|
61 |
state['total_tokens'] += completion['usage']['total_tokens']
|
62 |
|
63 |
+
# completion2 = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-1:] + [classification_msg], temperature=temperature, max_tokens=max_tokens)
|
64 |
+
# print(completion2)
|
65 |
+
messages2 = system_prompt + food_priming_prompt + history[-1:] + [classification_msg]
|
66 |
+
completion2 = client.chat.completions.create(
|
67 |
+
model=LLM_MODEL,
|
68 |
+
messages=messages2,
|
69 |
+
temperature=temperature,
|
70 |
+
max_tokens=max_tokens,
|
71 |
+
stream=False)
|
72 |
table = completion2.choices[0].message.to_dict()['content'].split("\n\n")[1]
|
73 |
print(table)
|
74 |
|