LuckRafly commited on
Commit
b87b67d
1 Parent(s): 29519fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -62
app.py CHANGED
@@ -1,62 +1,58 @@
1
- import streamlit as st
2
- from function import GetLLMResponse
3
-
4
- # List of math topics and difficulty levels
5
- math_topics = {
6
- "Elementary School Level": ["Basic Arithmetic", "Place Value", "Fraction", "Decimals", "Geomerty"],
7
- "Middle School Level": ["Algebra", "Ratio and Proportion", "Percentages", "Geometry", "Integers and Rational Numbers"],
8
- "High School Level": ["Algebra II", "Trigonometry", "Pre-Calculus", "Calculus", "Statistics and Probability"]
9
- }
10
-
11
- # Page configuration
12
- st.set_page_config(page_title="Generate Math Quizzes",
13
- page_icon="🧮",
14
- layout="centered",
15
- initial_sidebar_state="collapsed")
16
-
17
- # Header and description
18
- st.title("Generate Math Quizzes 🧮")
19
- st.text("Choose the difficulty level and topic for your math quizzes.")
20
-
21
- # User input for quiz generation
22
- ## Layout in columns
23
- col1, col2, col3 = st.columns([1, 1, 1])
24
-
25
- with col1:
26
- selected_topic_level = st.selectbox('Select Topic Level', list(math_topics.keys()))
27
-
28
- with col2:
29
- selected_topic = st.selectbox('Select Topic', math_topics[selected_topic_level])
30
-
31
- with col3:
32
- num_quizzes = st.slider('Number Quizzes', min_value=1, max_value= 5, value=1)
33
-
34
- submit = st.button('Generate Quizzes')
35
-
36
-
37
- # Final Response
38
- if submit:
39
- with st.spinner("Generating Quizzes..."):
40
- response = GetLLMResponse(selected_topic_level, selected_topic, num_quizzes)
41
- st.success("Quizzes Generated!")
42
-
43
- # Display questions and answers in a table
44
- if response:
45
- st.subheader("Quiz Questions and Answers:")
46
- # Prepare data for the table
47
- col1, col2 = st.columns(2)
48
- with col1:
49
- st.subheader("Questions")
50
- questions = response.get('questions')
51
- st.write(questions)
52
-
53
- with col2:
54
- st.subheader("Answers")
55
- answers = response.get('answer')
56
- st.write(answers)
57
-
58
- else:
59
- st.warning("No Quiz Questions and Answers")
60
-
61
- else:
62
- st.warning("Click the 'Generate Quizzes' button to create quizzes.")
 
1
+ from langchain.prompts import PromptTemplate
2
+ from langchain.llms import CTransformers
3
+ from langchain.chains import LLMChain
4
+ from langchain.chains import SequentialChain
5
+ from langchain.llms import HuggingFaceHub
6
+ from dotenv import load_dotenv
7
+
8
+ # load_dotenv();
9
+ config = {'max_new_tokens': 512, 'temperature': 0.6}
10
+
11
+ # Create function for app
12
+ def GetLLMResponse(selected_topic_level,
13
+ selected_topic,
14
+ num_quizzes):
15
+
16
+ # Calling llama model
17
+ # llm = CTransformers(model="D:\Code Workspace\DL Model\llama-2-7b-chat.ggmlv3.q8_0.bin",
18
+ # model_type = 'llama',
19
+ # config = config)
20
+
21
+ # llm = CTransformers(model='TheBloke/Llama-2-7B-Chat-GGML',
22
+ # model_file = 'llama-2-7b-chat.ggmlv3.q8_0.bin',
23
+ # model_type = 'llama',
24
+ # config = config)
25
+
26
+ llm = HuggingFaceHub(
27
+ repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1",
28
+ model_kwargs = config
29
+ )
30
+
31
+ ## Create LLM Chaining
32
+ questions_template = "I want you to just generate question with this specification: Generate a {selected_topic_level} math quiz on the topic of {selected_topic}. Generate only {num_quizzes} questions not more and without providing answers. The Question should not in image format/link"
33
+ questions_prompt = PromptTemplate(input_variables=["selected_topic_level", "selected_topic", "num_quizzes"],
34
+ template=questions_template)
35
+ questions_chain = LLMChain(llm= llm,
36
+ prompt = questions_prompt,
37
+ output_key = "questions")
38
+
39
+
40
+ answer_template = "I want you to become a teacher answer this specific Question:\n {questions}\n\n. You should gave me a straightforward and consise explanation and answer to each one of them"
41
+ answer_prompt = PromptTemplate(input_variables = ["questions"],
42
+ template = answer_template)
43
+ answer_chain = LLMChain(llm = llm,
44
+ prompt = answer_prompt,
45
+ output_key = "answer")
46
+
47
+ ## Create Sequential Chaining
48
+ seq_chain = SequentialChain(chains = [questions_chain, answer_chain],
49
+ input_variables = ['selected_topic_level', 'selected_topic', 'num_quizzes'],
50
+ output_variables = ['questions', 'answer'])
51
+
52
+ response = seq_chain({'selected_topic_level': selected_topic_level,
53
+ 'selected_topic': selected_topic,
54
+ 'num_quizzes' : num_quizzes})
55
+
56
+ ## Generate the response from the llama 2 model
57
+ print(response)
58
+ return response