# main.py from dotenv import load_dotenv import os from openai import OpenAI client = OpenAI(api_key='sk-proj-PxCAkOqCTsVhVWTJSMKJT3BlbkFJz0J48QsSGmrt9Qjud2Sl') import pprint from halo import Halo # Load environment variables from a .env file load_dotenv() # Function to generate a response from the model def generate_response(messages): # Create a loading spinner spinner = Halo(text='Loading...', spinner='dots') spinner.start() # Load the OpenAI key and model name from environment variables model_name = 'gpt-3.5-turbo-0301' # Create a chat completion with the provided messages response = client.chat.completions.create(model=model_name, messages=messages, temperature=0.5, max_tokens=250) # Stop the spinner once the response is received spinner.stop() # Pretty-print the messages sent to the model pp = pprint.PrettyPrinter(indent=4) print("Request:") pp.pprint(messages) # Print the usage statistics for the completion print(f"Completion tokens: {response.usage.completion_tokens}, Prompt tokens: {response.usage.prompt_tokens}, Total tokens: {response.usage.total_tokens}") # Return the message part of the response return response.choices[0].message # Main function to run the chatbot def main(): # Initialize the messages with a system message messages=[ {"role": "system", "content": "You are a kind and wise wizard"} ] # Continue chatting until the user types "quit" while True: input_text = input("You: ") if input_text.lower() == "quit": break # Add the user's message to the messages messages.append({"role": "user", "content": input_text}) # Get a response from the model and add it to the messages response = generate_response(messages) messages.append(response) # Print the assistant's response print(f"Wizard: {response.content}") # Run the main function when the script is run if __name__ == "__main__": main()