Spaces:
Sleeping
Sleeping
#%% | |
from dotenv import load_dotenv | |
from openai import OpenAI | |
import os | |
load_dotenv() | |
# Initialize the OpenAI model with strict output | |
import os | |
from openai import OpenAI | |
client = OpenAI( | |
# This is the default and can be omitted | |
api_key=os.environ.get("OPENAI_API_KEY"), | |
) | |
def chat(messages): | |
return client.chat.completions.create( | |
model="gpt-4o-mini", | |
messages=messages, | |
temperature=0.45, | |
max_tokens=2117, | |
top_p=0.29, | |
frequency_penalty=0, | |
presence_penalty=0, | |
response_format={ | |
"type": "text" | |
} | |
) | |
# import os | |
# from groq import Groq | |
# client = Groq( | |
# api_key=os.environ.get("GROQ_API_KEY"), | |
# ) | |
# chat_completion = client.chat.completions.create( | |
# messages=[ | |
# { | |
# "role": "user", | |
# "content": "Explain the importance of fast language models", | |
# } | |
# ], | |
# model="llama-3.1-8b-instant", | |
# ) | |
# print(chat_completion.choices[0].message.content) | |