Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
2 |
import gradio as gr
|
3 |
import torch
|
|
|
4 |
import spaces
|
5 |
|
6 |
# Initialize the model and tokenizer
|
@@ -22,6 +24,50 @@ SYSTEM_INSTRUCTION = (
|
|
22 |
"Under no circumstances should you provide the complete solution or final answer."
|
23 |
)
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
def apply_chat_template(messages):
|
26 |
"""
|
27 |
Prepares the messages for the model using the tokenizer's chat template.
|
@@ -60,8 +106,12 @@ def generate_response(chat_history, user_input):
|
|
60 |
]
|
61 |
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
62 |
|
|
|
|
|
63 |
# Append AI response to chat history
|
64 |
-
chat_history.append(("MathTutor",
|
|
|
|
|
65 |
|
66 |
# Return updated chat history
|
67 |
return chat_history
|
@@ -84,54 +134,6 @@ def create_chat_interface():
|
|
84 |
Creates the Gradio interface for the chat application.
|
85 |
"""
|
86 |
with gr.Blocks() as chat_app:
|
87 |
-
gr.HTML("""
|
88 |
-
<!-- Include KaTeX CSS and JS -->
|
89 |
-
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.8/dist/katex.min.css">
|
90 |
-
<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.8/dist/katex.min.js"></script>
|
91 |
-
<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.8/dist/contrib/auto-render.min.js"></script>
|
92 |
-
<script>
|
93 |
-
// Preprocess LaTeX content
|
94 |
-
function preprocessLatex(text) {
|
95 |
-
// Convert block math `[ ... ]` to `\\[ ... \\]`
|
96 |
-
text = text.replace(/\[([^\[\]]+)\]/g, '\\[$1\\]');
|
97 |
-
// Convert inline math `( ... )` to `\\( ... \\)`
|
98 |
-
text = text.replace(/\(([^\(\)]+)\)/g, '\\($1\\)');
|
99 |
-
return text;
|
100 |
-
}
|
101 |
-
|
102 |
-
// Render LaTeX only for elements requiring math
|
103 |
-
function renderChatLatex(mutationsList) {
|
104 |
-
for (const mutation of mutationsList) {
|
105 |
-
if (mutation.type === "childList") {
|
106 |
-
mutation.addedNodes.forEach((node) => {
|
107 |
-
if (node.nodeType === 1) { // Ensure it's an element node
|
108 |
-
// Check if the content needs LaTeX rendering
|
109 |
-
if (node.innerHTML.match(/\\\(|\\\[|\$|\[|\(/)) {
|
110 |
-
node.innerHTML = preprocessLatex(node.innerHTML);
|
111 |
-
renderMathInElement(node, {
|
112 |
-
delimiters: [
|
113 |
-
{ left: "\\(", right: "\\)", display: false },
|
114 |
-
{ left: "\\[", right: "\\]", display: true },
|
115 |
-
{ left: "$$", right: "$$", display: true },
|
116 |
-
{ left: "$", right: "$", display: false }
|
117 |
-
]
|
118 |
-
});
|
119 |
-
}
|
120 |
-
}
|
121 |
-
});
|
122 |
-
}
|
123 |
-
}
|
124 |
-
}
|
125 |
-
|
126 |
-
// Setup MutationObserver
|
127 |
-
document.addEventListener("DOMContentLoaded", () => {
|
128 |
-
const chatContainer = document.querySelector("#chat-container");
|
129 |
-
const observer = new MutationObserver(renderChatLatex);
|
130 |
-
observer.observe(chatContainer, { childList: true, subtree: true });
|
131 |
-
});
|
132 |
-
</script>
|
133 |
-
""")
|
134 |
-
|
135 |
gr.Markdown("## Math Hint Chat")
|
136 |
gr.Markdown(
|
137 |
"This chatbot provides hints and step-by-step guidance for solving math problems. "
|
|
|
1 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
from latex2mathml.converter import convert
|
3 |
import gradio as gr
|
4 |
import torch
|
5 |
+
import re
|
6 |
import spaces
|
7 |
|
8 |
# Initialize the model and tokenizer
|
|
|
24 |
"Under no circumstances should you provide the complete solution or final answer."
|
25 |
)
|
26 |
|
27 |
+
|
28 |
+
def render_latex_to_mathml(text):
|
29 |
+
"""
|
30 |
+
Converts LaTeX expressions in the text to MathML.
|
31 |
+
"""
|
32 |
+
try:
|
33 |
+
mathml = convert(text) # Converts LaTeX to MathML
|
34 |
+
return f"<math xmlns='http://www.w3.org/1998/Math/MathML'>{mathml}</math>"
|
35 |
+
except Exception as e:
|
36 |
+
return f"<span>Error rendering LaTeX: {str(e)}</span>"
|
37 |
+
|
38 |
+
def preprocess_response(response):
|
39 |
+
"""
|
40 |
+
Preprocess the response to convert LaTeX expressions in the text to MathML.
|
41 |
+
Only parts of the text that contain LaTeX are converted.
|
42 |
+
"""
|
43 |
+
# Regex patterns to detect LaTeX expressions
|
44 |
+
inline_latex_pattern = r"\$([^\$]+)\$" # Matches inline LaTeX between single $
|
45 |
+
block_latex_pattern = r"\$\$([^\$]+)\$\$" # Matches block LaTeX between $$
|
46 |
+
|
47 |
+
# Replace block LaTeX
|
48 |
+
def replace_block(match):
|
49 |
+
latex_code = match.group(1)
|
50 |
+
try:
|
51 |
+
return render_latex_to_mathml(latex_code)
|
52 |
+
except Exception as e:
|
53 |
+
return f"<span>Error rendering block LaTeX: {str(e)}</span>"
|
54 |
+
|
55 |
+
# Replace inline LaTeX
|
56 |
+
def replace_inline(match):
|
57 |
+
latex_code = match.group(1)
|
58 |
+
try:
|
59 |
+
return render_latex_to_mathml(latex_code)
|
60 |
+
except Exception as e:
|
61 |
+
return f"<span>Error rendering inline LaTeX: {str(e)}</span>"
|
62 |
+
|
63 |
+
# First process block LaTeX
|
64 |
+
response = re.sub(block_latex_pattern, replace_block, response)
|
65 |
+
|
66 |
+
# Then process inline LaTeX
|
67 |
+
response = re.sub(inline_latex_pattern, replace_inline, response)
|
68 |
+
|
69 |
+
return response
|
70 |
+
|
71 |
def apply_chat_template(messages):
|
72 |
"""
|
73 |
Prepares the messages for the model using the tokenizer's chat template.
|
|
|
106 |
]
|
107 |
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
108 |
|
109 |
+
rendered_response = preprocess_response(response)
|
110 |
+
|
111 |
# Append AI response to chat history
|
112 |
+
chat_history.append(("MathTutor", rendered_response))
|
113 |
+
|
114 |
+
|
115 |
|
116 |
# Return updated chat history
|
117 |
return chat_history
|
|
|
134 |
Creates the Gradio interface for the chat application.
|
135 |
"""
|
136 |
with gr.Blocks() as chat_app:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
gr.Markdown("## Math Hint Chat")
|
138 |
gr.Markdown(
|
139 |
"This chatbot provides hints and step-by-step guidance for solving math problems. "
|