File size: 2,845 Bytes
298d7d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f5aff24
 
 
 
 
 
 
 
 
 
 
 
 
298d7d8
 
f5aff24
 
298d7d8
 
 
 
 
 
 
f5aff24
298d7d8
 
 
 
 
 
 
 
 
 
f5aff24
 
 
298d7d8
f5aff24
 
298d7d8
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import gradio as gr
from huggingface_hub import InferenceClient
import sys
import io
import traceback

model_name = "Qwen/Qwen2.5-72B-Instruct"
client = InferenceClient(model_name)

def llm_inference(user_sample):
    eos_token = "<|endoftext|>"
    output = client.chat.completions.create(
        messages=[
            {"role": "system", "content": "You are a Python language guide. Write code on the user topic. Make sure that the code is runnable and doesn't close the shell window, so end with input() if the user request is simple. If the input is code, correct it for mistakes."},
            {"role": "user", "content": f"Write only python code without any explanation: {user_sample}"},
        ],
        stream=False,
        temperature=0.7,
        top_p=0.1,
        max_tokens=412,
        stop=[eos_token]
    )
    response = ''
    for choice in output.choices:
        response += choice['message']['content']
    return response

def chat(user_input, history):
    response = llm_inference(user_input)
    history.append((user_input, response))
    return history, history

def execute_code(code, user_inputs):
    # Split user inputs by newline
    inputs = user_inputs.strip().split('\n')
    input_iter = iter(inputs)
    
    # Custom input function to replace built-in input()
    def custom_input(prompt=''):
        try:
            return next(input_iter)
        except StopIteration:
            raise Exception("Not enough inputs provided.")
    
    # Redirect stdout
    old_stdout = sys.stdout
    redirected_output = sys.stdout = io.StringIO()
    old_input = __builtins__.input
    __builtins__.input = custom_input
    try:
        exec(code, {})
        output = redirected_output.getvalue()
    except Exception as e:
        output = f"Error: {e}\n{traceback.format_exc()}"
    finally:
        sys.stdout = old_stdout
        __builtins__.input = old_input
    return output

with gr.Blocks() as demo:
    gr.Markdown("# 🐍 Python Helper Chatbot")
    with gr.Tab("Chat"):
        chatbot = gr.Chatbot()
        msg = gr.Textbox(placeholder="Type your message here...")
        msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
    with gr.Tab("Interpreter"):
        gr.Markdown("### πŸ–₯️ Test Your Code")
        code_input = gr.Code(language="python", lines=20)
        gr.Markdown("#### πŸ“ Provide Inputs (Each input on a new line):")
        user_inputs = gr.Textbox(lines=5, placeholder="Enter inputs for your code here...")
        run_button = gr.Button("Run Code")
        code_output = gr.Textbox(label="Output", lines=15)
        run_button.click(execute_code, inputs=[code_input, user_inputs], outputs=code_output)
    with gr.Tab("Logs"):
        gr.Markdown("### πŸ“œ Logs")
        log_output = gr.Textbox(label="Logs", lines=10, interactive=False)

demo.launch()