Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import sys | |
import io | |
import traceback | |
model_name = "Qwen/Qwen2.5-72B-Instruct" | |
client = InferenceClient(model_name) | |
def llm_inference(user_sample): | |
eos_token = "<|endoftext|>" | |
output = client.chat.completions.create( | |
messages=[ | |
{"role": "system", "content": "You are a Python language guide. Write code on the user topic. Make sure that the code is runnable and doesn't close the shell window, so end with input() if the user request is simple. If the input is code, correct it for mistakes."}, | |
{"role": "user", "content": f"Write only python code without any explanation: {user_sample}"}, | |
], | |
stream=False, | |
temperature=0.7, | |
top_p=0.1, | |
max_tokens=412, | |
stop=[eos_token] | |
) | |
response = '' | |
for choice in output.choices: | |
response += choice['message']['content'] | |
return response | |
def chat(user_input, history): | |
response = llm_inference(user_input) | |
history.append((user_input, response)) | |
return history, history | |
def execute_code(code, user_inputs): | |
# Split user inputs by newline | |
inputs = user_inputs.strip().split('\n') | |
input_iter = iter(inputs) | |
# Custom input function to replace built-in input() | |
def custom_input(prompt=''): | |
try: | |
return next(input_iter) | |
except StopIteration: | |
raise Exception("Not enough inputs provided.") | |
# Redirect stdout | |
old_stdout = sys.stdout | |
redirected_output = sys.stdout = io.StringIO() | |
old_input = __builtins__.input | |
__builtins__.input = custom_input | |
try: | |
exec(code, {}) | |
output = redirected_output.getvalue() | |
except Exception as e: | |
output = f"Error: {e}\n{traceback.format_exc()}" | |
finally: | |
sys.stdout = old_stdout | |
__builtins__.input = old_input | |
return output | |
with gr.Blocks() as demo: | |
gr.Markdown("# π Python Helper Chatbot") | |
with gr.Tab("Chat"): | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox(placeholder="Type your message here...") | |
msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot]) | |
with gr.Tab("Interpreter"): | |
gr.Markdown("### π₯οΈ Test Your Code") | |
code_input = gr.Code(language="python", lines=20) | |
gr.Markdown("#### π Provide Inputs (Each input on a new line):") | |
user_inputs = gr.Textbox(lines=5, placeholder="Enter inputs for your code here...") | |
run_button = gr.Button("Run Code") | |
code_output = gr.Textbox(label="Output", lines=15) | |
run_button.click(execute_code, inputs=[code_input, user_inputs], outputs=code_output) | |
with gr.Tab("Logs"): | |
gr.Markdown("### π Logs") | |
log_output = gr.Textbox(label="Logs", lines=10, interactive=False) | |
demo.launch() | |