Spaces:
Sleeping
Sleeping
Create openai_utils.py
Browse files- helpers/openai_utils.py +49 -0
helpers/openai_utils.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain.chat_models import ChatOpenAI
|
3 |
+
from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
|
4 |
+
from langchain.chains import LLMChain
|
5 |
+
|
6 |
+
|
7 |
+
def get_quiz_data(text, openai_api_key):
|
8 |
+
template = f"""
|
9 |
+
You are a helpful assistant programmed to generate questions based on any text provided. For every chunk of text you receive, you're tasked with designing 5 distinct questions. Each of these questions will be accompanied by 3 possible answers: one correct answer and two incorrect ones.
|
10 |
+
|
11 |
+
For clarity and ease of processing, structure your response in a way that emulates a Python list of lists.
|
12 |
+
|
13 |
+
Your output should be shaped as follows:
|
14 |
+
|
15 |
+
1. An outer list that contains 5 inner lists.
|
16 |
+
2. Each inner list represents a set of question and answers, and contains exactly 4 strings in this order:
|
17 |
+
- The generated question.
|
18 |
+
- The correct answer.
|
19 |
+
- The first incorrect answer.
|
20 |
+
- The second incorrect answer.
|
21 |
+
|
22 |
+
Your output should mirror this structure:
|
23 |
+
[
|
24 |
+
["Generated Question 1", "Correct Answer 1", "Incorrect Answer 1.1", "Incorrect Answer 1.2"],
|
25 |
+
["Generated Question 2", "Correct Answer 2", "Incorrect Answer 2.1", "Incorrect Answer 2.2"],
|
26 |
+
...
|
27 |
+
]
|
28 |
+
|
29 |
+
It is crucial that you adhere to this format as it's optimized for further Python processing.
|
30 |
+
|
31 |
+
"""
|
32 |
+
try:
|
33 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
34 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template("{text}")
|
35 |
+
chat_prompt = ChatPromptTemplate.from_messages(
|
36 |
+
[system_message_prompt, human_message_prompt]
|
37 |
+
)
|
38 |
+
chain = LLMChain(
|
39 |
+
llm=ChatOpenAI(openai_api_key=openai_api_key),
|
40 |
+
prompt=chat_prompt,
|
41 |
+
)
|
42 |
+
return chain.run(text)
|
43 |
+
except Exception as e:
|
44 |
+
if "AuthenticationError" in str(e):
|
45 |
+
st.error("Incorrect API key provided. Please check and update your API key.")
|
46 |
+
st.stop()
|
47 |
+
else:
|
48 |
+
st.error(f"An error occurred: {str(e)}")
|
49 |
+
st.stop()
|