MCQ_Creation / app.py
vanga732's picture
Update app.py
db8c51b verified
import streamlit as st
from transformers import pipeline
import openai
from transformers import pipeline
messages = [
{"role": "user", "content": "Who are you?"},
]
generator = pipeline("text-generation", model="meta-llama/Meta-Llama-3-8B-Instruct")
# Function to generate MCQs using open-source LLM
def generate_mcq(text, num_questions):
questions = []
for _ in range(num_questions):
# Use max_new_tokens to avoid length issues
question = generator(text, max_new_tokens=50, num_return_sequences=1)[0]['generated_text']
questions.append(question)
return questions
# Function to generate MCQs using OpenAI API with gpt-3.5-turbo
def generate_mcq_openai(text, num_questions):
openai.api_key = "sk-proj-uHasdgbriPPlFMm99ZtJT3BlbkFJOl231YfdxCSNmQjVEpMX"
messages = [
{"role": "system", "content": "You are a helpful assistant that generates multiple-choice questions."},
{"role": "user", "content": f"Generate {num_questions} multiple choice questions based on the following text:\n\n{text}"}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=450, # Adjust to a reasonable value
n=1,
stop=None,
temperature=0.7,
)
questions = response.choices[0].message["content"].strip().split("\n\n")
return questions
# Streamlit app
st.title("MCQ Question Generator")
text = st.text_area("Enter the text for generating MCQs:", "")
num_questions = st.selectbox("Select number of questions:", [5, 10, 15])
model_type = st.radio("Select the model to generate questions:", ("Open-Source LLM", "OpenAI API"))
if st.button("Generate MCQs"):
if text:
if model_type == "Open-Source LLM":
mcq_questions = generate_mcq(text, num_questions)
else:
mcq_questions = generate_mcq_openai(text, num_questions)
for i, question in enumerate(mcq_questions):
st.markdown(f"**Q{i+1}:** {question.split('?')[0]}?")
options = question.split('?')[1].strip().split('\n')
for option in options:
st.write(option)