from transformers import AutoModelForCausalLM, AutoTokenizer from latex2mathml.converter import convert import gradio as gr import torch import re import spaces # Initialize the model and tokenizer model_name = "Qwen/Qwen2.5-Math-1.5B-Instruct" device = "cuda" if torch.cuda.is_available() else "cpu" model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.float16 if device == "cuda" else torch.float32, device_map="auto" if device == "cuda" else None ) tokenizer = AutoTokenizer.from_pretrained(model_name) # System instruction SYSTEM_INSTRUCTION = ( "You are a helpful and patient math tutor tasked with providing step-by-step hints and guidance for solving math problems." "Your primary role is to assist learners in understanding how to approach and solve problems without revealing the final answer, even if explicitly requested." "Always encourage the learner to solve the problem themselves by offering incremental hints and explanations." "Under no circumstances should you provide the complete solution or final answer." ) def render_latex_to_mathml(text): """ Converts LaTeX expressions in the text to MathML. """ try: mathml = convert(text) # Converts LaTeX to MathML return f"{mathml}" except Exception as e: return f"Error rendering LaTeX: {str(e)}" def preprocess_response(response): """ Preprocess the response to convert LaTeX expressions in the text to MathML. Only parts of the text that contain LaTeX are converted. """ # Regex patterns to detect LaTeX expressions inline_latex_pattern = r"\$([^\$]+)\$" # Matches inline LaTeX between single $ block_latex_pattern = r"\$\$([^\$]+)\$\$" # Matches block LaTeX between $$ # Replace block LaTeX def replace_block(match): latex_code = match.group(1) try: return render_latex_to_mathml(latex_code) except Exception as e: return f"Error rendering block LaTeX: {str(e)}" # Replace inline LaTeX def replace_inline(match): latex_code = match.group(1) try: return render_latex_to_mathml(latex_code) except Exception as e: return f"Error rendering inline LaTeX: {str(e)}" # First process block LaTeX response = re.sub(block_latex_pattern, replace_block, response) # Then process inline LaTeX response = re.sub(inline_latex_pattern, replace_inline, response) return response def apply_chat_template(messages): """ Prepares the messages for the model using the tokenizer's chat template. """ return tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) @spaces.GPU def generate_response(chat_history, user_input): """ Generates a response from the model based on the chat history and user input. """ # Append user input to chat history chat_history.append(("User", user_input + "\n\n strinctly prohibited to reveal answer only provide hints and guidelines to solve this")) # Prepare messages for the model messages = [{"role": "system", "content": SYSTEM_INSTRUCTION}] + [ {"role": "user", "content": msg[1]} if msg[0] == "User" else {"role": "assistant", "content": msg[1]} for msg in chat_history ] # Tokenize the input for the model text = apply_chat_template(messages) model_inputs = tokenizer([text], return_tensors="pt").to(device) # Generate the model's response generated_ids = model.generate( **model_inputs, max_new_tokens=512 ) generated_ids = [ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) ] response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] rendered_response = preprocess_response(response) # Append AI response to chat history chat_history.append(("MathTutor", rendered_response)) # Return updated chat history return chat_history def format_chat_history(history): """ Formats the conversation history for a user-friendly chat display. """ chat_display = "" for message in history: if message["role"] == "user": chat_display += f"**User:** {message['content']}\n\n" elif message["role"] == "assistant": chat_display += f"**MathTutor:** {message['content']}\n\n" return chat_display # Gradio chat interface def create_chat_interface(): """ Creates the Gradio interface for the chat application. """ with gr.Blocks() as chat_app: gr.Markdown("## Math Hint Chat") gr.Markdown( "This chatbot provides hints and step-by-step guidance for solving math problems. " ) chatbot = gr.Chatbot(label="Math Tutor Chat", elem_id="chat-container") user_input = gr.Textbox( placeholder="Ask your math question here (e.g., Solve for x: 4x + 5 = 6x + 7)", label="Your Query" ) send_button = gr.Button("Send") # Hidden state for managing chat history chat_history = gr.State([]) # Button interaction for chat send_button.click( fn=generate_response, inputs=[chat_history, user_input], outputs=[chatbot] ) return chat_app app = create_chat_interface() app.launch(debug=True)