Makima57 commited on
Commit
3d60af0
β€’
1 Parent(s): d76bf3b

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +88 -38
app.py CHANGED
@@ -1,59 +1,109 @@
1
 
2
- import streamlit as st
3
- import ctranslate2
4
- from transformers import AutoTokenizer
5
- from huggingface_hub import snapshot_download
6
- from codeexecutor import postprocess_completion,get_majority_vote
 
7
 
8
  # Define the model and tokenizer loading
9
- model_prompt = "Solve the following mathematical problem: "
10
  tokenizer = AutoTokenizer.from_pretrained("AI-MO/NuminaMath-7B-TIR")
11
  model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
12
  generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")
13
- iterations=10
14
 
15
  # Function to generate predictions using the model
16
  def get_prediction(question):
17
  input_text = model_prompt + question
18
  input_tokens = tokenizer.tokenize(input_text)
19
- results = generator.generate_batch([input_tokens])
 
 
 
 
 
20
  output_tokens = results[0].sequences[0]
21
  predicted_answer = tokenizer.convert_tokens_to_string(output_tokens)
22
- return predicted_answer
23
 
24
- # Function to perform majority voting across multiple predictions
25
- def majority_vote(question, num_iterations=10):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  all_predictions = []
27
- all_answer=[]
 
 
28
  for _ in range(num_iterations):
29
  prediction = get_prediction(question)
30
- answer=postprocess_completion(prediction,True,True)
31
  all_predictions.append(prediction)
32
- all_answer.append(answer)
33
- majority_voted_pred = max(set(all_predictions), key=all_predictions.count)
34
- majority_voted_ans=get_majority_vote(all_answer)
35
- return majority_voted_pred, all_predictions,majority_voted_ans
36
-
37
- # Streamlit app UI
38
- st.title("Math Question Solver")
39
- st.write("Enter a math question to get the model prediction and see all generated answers.")
40
-
41
- # Input field for math question
42
- question = st.text_input("Math Question", placeholder="Enter your math question here...")
43
-
44
- # Input field for correct answer
45
- correct_answer = st.text_input("Correct Answer", placeholder="Enter the correct answer here...")
46
-
47
- # Button to trigger prediction
48
- if st.button("Get Prediction"):
49
- if question and correct_answer:
50
- final_prediction, all_predictions,final_answer = majority_vote(question, iterations)
51
- st.write("Question: ", question)
52
- st.write("Generated Answers (10 iterations): ", all_predictions)
53
- st.write("Majority-Voted Prediction: ", final_prediction)
54
- st.write("Correct solution: ", correct_answer)
55
- st.write("Majority answer: ", final_answer)
56
  else:
57
- st.error("Please enter both math question and correct answer")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
 
 
1
 
2
+ import gradio as gr
3
+ import ctranslate2
4
+ from transformers import AutoTokenizer
5
+ from huggingface_hub import snapshot_download
6
+ from codeexecutor import get_majority_vote
7
+ import re
8
 
9
  # Define the model and tokenizer loading
10
+ model_prompt = "Explain and solve the following mathematical problem step by step, showing all work: "
11
  tokenizer = AutoTokenizer.from_pretrained("AI-MO/NuminaMath-7B-TIR")
12
  model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
13
  generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")
14
+ iterations = 10
15
 
16
  # Function to generate predictions using the model
17
  def get_prediction(question):
18
  input_text = model_prompt + question
19
  input_tokens = tokenizer.tokenize(input_text)
20
+ results = generator.generate_batch(
21
+ [input_tokens],
22
+ max_length=512,
23
+ sampling_temperature=0.7,
24
+ sampling_topk=40,
25
+ )
26
  output_tokens = results[0].sequences[0]
27
  predicted_answer = tokenizer.convert_tokens_to_string(output_tokens)
28
+ return predicted_answer
29
 
30
+ # Function to parse the prediction to extract the answer and steps
31
+ def parse_prediction(prediction):
32
+ lines = prediction.strip().split('
33
+ ')
34
+ answer = None
35
+ steps = []
36
+ for line in lines:
37
+ # Check for "Answer:" or "answer:"
38
+ match = re.match(r'^\s*(?:Answer|answer)\s*[:=]\s*(.*)', line)
39
+ if match:
40
+ answer = match.group(1).strip()
41
+ else:
42
+ steps.append(line)
43
+ if answer is None:
44
+ # If no "Answer:" found, assume last line is the answer
45
+ answer = lines[-1].strip()
46
+ steps = lines[:-1]
47
+ steps_text = '
48
+ '.join(steps).strip()
49
+ return answer, steps_text
50
+
51
+ # Function to perform majority voting and get steps
52
+ def majority_vote_with_steps(question, num_iterations=10):
53
  all_predictions = []
54
+ all_answers = []
55
+ steps_list = []
56
+
57
  for _ in range(num_iterations):
58
  prediction = get_prediction(question)
59
+ answer, steps = parse_prediction(prediction)
60
  all_predictions.append(prediction)
61
+ all_answers.append(answer)
62
+ steps_list.append(steps)
63
+
64
+ # Get the majority voted answer
65
+ majority_voted_ans = get_majority_vote(all_answers)
66
+
67
+ # Find the steps corresponding to the majority voted answer
68
+ for i, ans in enumerate(all_answers):
69
+ if ans == majority_voted_ans:
70
+ steps_solution = steps_list[i]
71
+ break
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  else:
73
+ steps_solution = "No steps found"
74
+
75
+ return majority_voted_ans, steps_solution
76
+
77
+ # Gradio interface for user input and output
78
+ def gradio_interface(question, correct_answer):
79
+ final_answer, steps_solution = majority_vote_with_steps(question, iterations)
80
+ return {
81
+ "Question": question,
82
+ "Majority-Voted Answer": final_answer,
83
+ "Steps to Solve": steps_solution,
84
+ "Correct Solution": correct_answer
85
+ }
86
+
87
+ # Custom CSS for enhanced design (unchanged)
88
+
89
+
90
+ # Gradio app setup
91
+ interface = gr.Interface(
92
+ fn=gradio_interface,
93
+ inputs=[
94
+ gr.Textbox(label="🧠 Math Question", placeholder="Enter your math question here...", elem_id="math_question"),
95
+ gr.Textbox(label="βœ… Correct Answer", placeholder="Enter the correct answer here...", elem_id="correct_answer"),
96
+ ],
97
+ outputs=[
98
+ gr.JSON(label="πŸ“Š Results"), # Display the results in a JSON format
99
+ ],
100
+ title="πŸ”’ Math Question Solver",
101
+ description="Enter a math question to get the model's majority-voted answer and steps to solve the problem.",
102
+
103
+ )
104
+
105
+ if __name__ == "__main__":
106
+ interface.launch()
107
+
108
 
109