Spaces:
Runtime error
Runtime error
import streamlit as st | |
import openai | |
import os | |
from openai import OpenAI | |
import re | |
# Initialize the session state for the selection box | |
if "selected_option" not in st.session_state: | |
st.session_state["selected_option"] = None # Default value | |
if "selected_task" not in st.session_state: | |
st.session_state["selected_task"] = None # Default value | |
api_key = os.getenv("NVIDIA_API_KEY") | |
# Check if the API key is found | |
if api_key is None: | |
st.error("NVIDIA_API_KEY environment variable not found.") | |
else: | |
# Initialize the OpenAI client | |
client = OpenAI( | |
base_url="https://integrate.api.nvidia.com/v1", | |
api_key=api_key | |
) | |
def generate_prompt(topic, difficulty, num_questions): | |
"""Generates an AI prompt based on user input.""" | |
prompt = ( | |
f"Generate {num_questions} quiz questions on the topic '{topic}' " | |
f"with a difficulty level of '{difficulty}'." | |
) | |
return prompt | |
def generate_ai_response(prompt, enablestreaming): | |
"""Generates a response from an AI model | |
Args: | |
prompt: The prompt to send to the AI model. | |
Returns: | |
response from the AI model. | |
""" | |
try: | |
completion = client.chat.completions.create( | |
model="meta/llama-3.3-70b-instruct", | |
temperature=0.5, # Adjust temperature for creativity | |
top_p=1, | |
max_tokens=1024, | |
messages=[ | |
{ | |
"role": "system", | |
"content": "You are an AI assistant designed to generate educational \ | |
questions that foster higher-order thinking skills in line \ | |
with outcomes-based education. For each question, focus on \ | |
evaluating skills such as analysis, synthesis, application, \ | |
and evaluation rather than simple recall. Create multiple-choice \ | |
questions with four answer options, clearly indicating the \ | |
correct answer. Your output should strictly follow this \ | |
JSON format:\n\n{\n \"question\": \"<Insert the question \ | |
text here>\",\n \"options\": [\n \"<Option A>\",\n \ | |
\"<Option B>\",\n \"<Option C>\",\n \"<Option D>\"\n ],\n \ | |
\"correct_answer\": \"<Insert the correct option text here>\"\n}\n\n \ | |
Ensure questions are designed to encourage critical thinking \ | |
and align with measurable learning outcomes. The topic can \ | |
range across various disciplines based on provided inputs \ | |
or your general knowledge. For instance, if the topic is \ | |
'AI and Machine Learning,' ensure the question engages \ | |
learners in practical or theoretical applications of the \ | |
subject. Ensure that every question is unique in a set of questions." | |
}, | |
{ | |
"role": "user", | |
"content": prompt | |
} | |
], | |
stream = enablestreaming | |
) | |
if enablestreaming: | |
# Extract and display the response | |
response_container = st.empty() | |
model_response="" | |
for chunk in completion: | |
if chunk.choices[0].delta.content is not None: | |
model_response += chunk.choices[0].delta.content | |
response_container.write(model_response) | |
elif 'error' in chunk: | |
st.error(f"Error occurred: {chunk['error']}") | |
break | |
else: | |
# this is used if output streaming is not enabled | |
model_response = completion.choices[0].message.content | |
return model_response | |
except Exception as e: | |
st.error(f"An error occurred: {e}") | |
return None | |
st.title("Quiz Question Generator") | |
with st.expander("About"): | |
st.write( | |
"This app generates quiz questions based on a given topic and difficulty level." | |
) | |
# Step 1: Prompt user for a topic | |
topic = st.text_input("Enter a topic:", "") | |
# Step 2: Prompt for difficulty level | |
difficulty_levels = ["Easy", "Medium", "Hard"] | |
difficulty = st.selectbox("Select difficulty level:", difficulty_levels) | |
# Step 3: Prompt for the number of questions | |
num_questions = st.selectbox("Select the number of questions:", [5, 10, 15]) | |
# Generate AI prompt | |
ai_prompt = "" | |
if topic.strip(): | |
ai_prompt = generate_prompt(topic, difficulty, num_questions) | |
st.write(f"Generated Prompt: {ai_prompt}") | |
else: | |
st.warning("Please enter a topic before generating a prompt.") | |
# Get AI response | |
if st.button("Get Response"): | |
if topic.strip(): | |
print(ai_prompt) | |
with st.spinner("Thinking..."): | |
response = generate_ai_response(ai_prompt, enablestreaming=True) | |
st.success("Response generated successfully.") | |
else: | |
st.warning("Please generate a prompt first before getting a response.") |