File size: 10,526 Bytes
224325d
 
 
 
 
 
 
 
 
 
 
 
f4fc74b
224325d
f4fc74b
12c7045
3f51efb
 
2f59c4c
94713a5
d62d7da
3f51efb
 
f4fc74b
224325d
 
 
ea73162
12c7045
 
224325d
 
 
3f51efb
224325d
 
 
 
 
f4fc74b
 
 
224325d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cc387c9
f4fc74b
 
12c7045
456ef55
 
 
12c7045
c346ca5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d56c258
 
 
 
 
 
224325d
 
 
 
 
cc387c9
224325d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ea73162
 
 
 
 
 
 
 
 
 
 
 
 
 
224325d
 
f4fc74b
224325d
 
 
 
 
12c7045
 
 
 
224325d
f4fc74b
 
224325d
d62d7da
 
224325d
12c7045
 
 
224325d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f4fc74b
224325d
 
 
 
849ae95
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
import requests

check_ipinfo = requests.get("https://ipinfo.io").json()['country']
print("Run-Location-As: ",check_ipinfo)


import gradio as gr
import ollama

# List of available models for selection.
# IMPORTANT: These names must correspond to models that have been either


# Model from run.sh
MODEL_ID_MAP = {
    "(阿里千問)Qwen3-4B-Instruct-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF:Q4_K_M',
    "(IBM)Granite3.3-2B": 'granite3.3:2b',
    "(Meta)Llama3.2-3B-Instruct": 'hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M', # OK speed with CPU
    #"(阿里千問)Qwen3-4B-Thinking-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Thinking-2507-GGUF:Q4_K_M',
    "(Google)Gemma3n-e2b-it": 'gemma3n:e2b-it-q4_K_M',
    #"(Tencent)混元-1.8B-Instruct":'hf.co/bartowski/tencent_Hunyuan-1.8B-Instruct-GGUF:Q4_K_M',
    #"(Tencent)混元-4B-Instruct": 'hf.co/bartowski/tencent_Hunyuan-4B-Instruct-GGUF:Q4_K_M',
    "(HuggingFace)SmolLM2-360M": 'smollm2:360m-instruct-q5_K_M'
}


# Default System Prompt
DEFAULT_SYSTEM_PROMPT = """Answer everything in simple, smart, relevant and accurate style. No chatty! Besides, pls:
    1. 如果查詢是以中文輸入,使用標準繁體中文回答
    2. 如果查詢是以英文輸入,使用英文回答"""

# --- Gradio Interface ---
with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="neutral")) as demo:
    gr.Markdown(f"## HH demo: LLM/SLM chatbot running with CPU only.") # Changed title to be more generic
    gr.Markdown(f"(Run-Location-As: `{check_ipinfo}`)")
    gr.Markdown("Chat with the model, customize its behavior with a system prompt, and toggle streaming output.")

    # Model Selection
    with gr.Row():
        selected_model_label = gr.Radio(
            choices=list(MODEL_ID_MAP.keys()),
            value=list(MODEL_ID_MAP.keys())[0], # Default to first display name
            label="Select Model",
            info="Choose the LLM model to chat with.",
            interactive=True
        )

    chatbot = gr.Chatbot(
        label="Conversation",
        height=400,
        type='messages',
        layout="bubble"
    )

    with gr.Row():
        msg = gr.Textbox(
            show_label=False,
            placeholder="Type your message here and press Enter...",
            lines=1,
            scale=4,
            container=False
        )

    with gr.Accordion("Advanced Options", open=False):
        with gr.Row():
            stream_checkbox = gr.Checkbox(
                label="Stream Output",
                value=True,
                info="Enable to see the response generate in real-time."
            )
            use_custom_prompt_checkbox = gr.Checkbox(
                label="Use Custom System Prompt",
                value=False,
                info="Check this box to provide your own system prompt below."
            )

        # --- New: System Prompt Options ---
        SYSTEM_PROMPT_OPTIONS = {
            "AI Henry": DEFAULT_SYSTEM_PROMPT,
            "繁體中文回答":"無論如何,必須使用標準繁體中文回答. Answer everything in simple, smart, relevant and accurate style. No chatty!",
            "简体中文回答":"无论如何,必须使用标准简体中文回答. Answer everything in simple, smart, relevant and accurate style. No chatty!",
            "English Chat":"You must reply by English. Answer everything in simple, smart, relevant and accurate style. No chatty!",
            "Friendly & Conversational":"Respond in a warm, friendly, and engaging tone. Use natural language and offer helpful suggestions. Keep responses concise but personable.",
            "Professional & Formal":"Maintain a formal and professional tone. Use precise language, avoid slang, and ensure responses are suitable for business or academic contexts.",
            "Elon Musk style":"You must chat in Elon Musk style!",
            "Good Coder":"""
                You are a highly capable coding assistant specialized in software development, algorithms, and debugging. Your responses must be accurate, efficient, and tailored to the user's request. Always follow these principles:
                1. Use clear, well-commented code.
                2. Prioritize readability and best practices.
                3. When asked to explain, provide concise, step-by-step reasoning.
                4. When asked to generate code, include input/output examples if relevant.
                5. If the user provides buggy code, identify the issue and suggest a fix.
                6. If multiple solutions exist, briefly compare them and recommend the best.
                7. Always respect the specified programming language, libraries, and constraints.
                8. Never make assumptions beyond the user’s instructions unless explicitly asked.
                9. If the task is ambiguous, ask clarifying questions before proceeding.
                10. Avoid unnecessary boilerplate unless requested.
                11. Use only open-source and free resources, libraries, and APIs. Do not suggest or rely on paid, proprietary, or license-restricted tools unless explicitly requested.
                12. You code shall be fine to run on Colab or Kaggle.
                13. Always include inner comments for user learning.
                14. Always provide installation and operation steps.
                
                You support multiple languages including Python, JavaScript, TypeScript, C++, Java, Go, Rust, and Bash. You can also assist with frameworks like React, Node.js, Django, Flask, and more.
                
                Your goal is to help the user write better code, faster, and deepen their understanding of programming concepts.
                """,
            "Test1(Auto TC/EN)":"Always detect the user's input language and respond in that same language. Do not translate unless explicitly requested. Answer everything in simple, smart, relevant and accurate style. No chatty!",
            "Simulate Tencent Auto TC/EN":"""Answer everything in simple, smart, relevant and accurate style. No chatty! Besides, pls:
                1. 如果查詢是以中文輸入,使用標準繁體中文回答,符合官方文書規範 
                2. 要提供引用規則依据
                3. 如果查詢是以英文輸入,使用英文回答"""
        }

        system_prompt_selector = gr.Radio(
            label="Choose a System Prompt Style",
            choices=list(SYSTEM_PROMPT_OPTIONS.keys()),
            value="AI Henry",
            interactive=True
        )

        system_prompt_textbox = gr.Textbox(
            label="System Prompt",
            value=DEFAULT_SYSTEM_PROMPT,
            lines=3,
            placeholder="Enter a system prompt to guide the model's behavior...",
            interactive=False
        )

    # Function to toggle the interactivity of the system prompt textbox
    def toggle_system_prompt(use_custom):
        return gr.update(interactive=use_custom)

    use_custom_prompt_checkbox.change(
        fn=toggle_system_prompt,
        inputs=use_custom_prompt_checkbox,
        outputs=system_prompt_textbox,
        queue=False
    )

    # Function to update textbox when prompt style changes
    def update_prompt_text(selected_key, use_custom):
        if not use_custom:
            return gr.update(value=SYSTEM_PROMPT_OPTIONS[selected_key])
        else:
            return gr.update()

    system_prompt_selector.change(
        fn=update_prompt_text,
        inputs=[system_prompt_selector, use_custom_prompt_checkbox],
        outputs=system_prompt_textbox,
        queue=False
    )

    # --- Core Chat Logic ---
    # This function is the heart of the application.
    def respond(history, system_prompt, stream_output, selected_model_name, selected_prompt_key, use_custom_prompt): # Added selected_model_name
        """
        This is the single function that handles the entire chat process.
        It takes the history, prepends the system prompt, calls the Ollama API,
        and streams the response back to the chatbot.
        """
        
        # Use selected predefined prompt unless custom is enabled
        if not use_custom_prompt:
            system_prompt = SYSTEM_PROMPT_OPTIONS[selected_prompt_key]

        current_selected_model = MODEL_ID_MAP[selected_model_name]

        #Disable Qwen3 thinking
        #if "Qwen3".lower() in current_selected_model:
        #    system_prompt = system_prompt+" /no_think"

        if any(substring in current_selected_model.lower() for substring in ["qwen3-0.6b", "qwen3-1.7b"]):
            system_prompt = system_prompt+" /no_think"
        
        # The 'history' variable from Gradio contains the entire conversation.
        # We prepend the system prompt to this history to form the final payload.
        messages = [{"role": "system", "content": system_prompt}] + history

        # Add a placeholder for the assistant's response to the UI history.
        # This creates the space where the streamed response will be displayed.
        history.append({"role": "assistant", "content": ""})

        # Stream the response from the Ollama API using the currently selected model
        response_stream = ollama.chat(
            model=current_selected_model, # Use the dynamically selected model
            messages=messages,
            stream=True
        )

        # Iterate through the stream, updating the placeholder with each new chunk.
        for chunk in response_stream:
            if chunk['message']['content']:
                history[-1]['content'] += chunk['message']['content']
                # Yield the updated history to the chatbot for a real-time effect.
                yield history

    # This function handles the user's submission.
    def user_submit(history, user_message):
        """
        Adds the user's message to the chat history and clears the input box.
        This prepares the state for the main 'respond' function.
        """
        return history + [{"role": "user", "content": user_message}], ""

    # Gradio Event Wiring
    msg.submit(
        user_submit,
        inputs=[chatbot, msg],
        outputs=[chatbot, msg],
        queue=False
    ).then(
        respond,
        inputs=[chatbot, system_prompt_textbox, stream_checkbox, selected_model_label, system_prompt_selector, use_custom_prompt_checkbox], # Pass new inputs
        outputs=[chatbot]
    )

# Launch the Gradio interface
demo.launch(server_name="0.0.0.0", server_port=7860)