Kai Izumoto commited on
Commit
aac47db
Β·
verified Β·
1 Parent(s): 07f392a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +254 -0
app.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SuperCoder - Hugging Face Spaces Frontend
3
+ Connects to your local API server via tunnel
4
+ """
5
+ import gradio as gr
6
+ import requests
7
+ from typing import List, Tuple
8
+
9
+ # ============================================================================
10
+ # Configuration - EDIT THIS WITH YOUR TUNNEL URL
11
+ # ============================================================================
12
+ API_URL = "https://inge-chalcographic-helene.ngrok-free.dev"
13
+
14
+ # Example URLs:
15
+ # ngrok: https://abc123.ngrok-free.app
16
+ # cloudflare: https://abc123.trycloudflare.com
17
+
18
+
19
+ # ============================================================================
20
+ # API Client Functions
21
+ # ============================================================================
22
+ def call_api(message: str, temperature: float = 0.1, max_tokens: int = 512) -> str:
23
+ """
24
+ Call the SuperCoder API running on your local machine.
25
+ """
26
+ try:
27
+ response = requests.post(
28
+ f"{API_URL}/api/chat",
29
+ json={
30
+ "messages": [{"role": "user", "content": message}],
31
+ "temperature": temperature,
32
+ "max_tokens": max_tokens,
33
+ "stream": False
34
+ },
35
+ timeout=60
36
+ )
37
+
38
+ if response.status_code == 200:
39
+ result = response.json()
40
+ return result.get("response", "No response from API")
41
+ else:
42
+ return f"❌ API Error ({response.status_code}): {response.text}"
43
+
44
+ except requests.exceptions.Timeout:
45
+ return "⏱️ Request timed out. The model might be processing a complex request."
46
+
47
+ except requests.exceptions.ConnectionError:
48
+ return "πŸ”Œ Connection failed. Please ensure your local API server is running."
49
+
50
+ except Exception as e:
51
+ return f"⚠️ Error: {str(e)}"
52
+
53
+
54
+ def check_api_status() -> str:
55
+ """Check if the API is reachable."""
56
+ try:
57
+ response = requests.get(f"{API_URL}/health", timeout=5)
58
+ if response.status_code == 200:
59
+ data = response.json()
60
+ if data.get("model_loaded"):
61
+ return "βœ… Connected - Model Ready"
62
+ else:
63
+ return "⚠️ Connected but model not loaded"
64
+ else:
65
+ return f"❌ API returned status {response.status_code}"
66
+ except:
67
+ return "πŸ”΄ Not connected to API"
68
+
69
+
70
+ # ============================================================================
71
+ # Gradio Interface
72
+ # ============================================================================
73
+ def chat_interface(message: str, history: List[Tuple[str, str]],
74
+ temperature: float, max_tokens: int) -> Tuple[List[Tuple[str, str]], str]:
75
+ """Handle chat interaction."""
76
+ if not message.strip():
77
+ return history, ""
78
+
79
+ # Add user message to history
80
+ history = history + [(message, None)]
81
+
82
+ # Get AI response
83
+ response = call_api(message, temperature, max_tokens)
84
+
85
+ # Update history with response
86
+ history[-1] = (message, response)
87
+
88
+ return history, ""
89
+
90
+
91
+ # ============================================================================
92
+ # Quick Action Templates
93
+ # ============================================================================
94
+ QUICK_ACTIONS = {
95
+ "Explain Code": "Explain the following code:\n\n```python\n# PASTE YOUR CODE HERE\n```",
96
+ "Debug Code": "Help me debug this code:\n\n```python\n# PASTE YOUR CODE HERE\n```",
97
+ "Write Function": "Write a Python function that:",
98
+ "Optimize Code": "Optimize this code for better performance:\n\n```python\n# PASTE YOUR CODE HERE\n```",
99
+ "Add Comments": "Add detailed comments to this code:\n\n```python\n# PASTE YOUR CODE HERE\n```",
100
+ }
101
+
102
+ def use_template(template_name: str) -> str:
103
+ """Return the selected template."""
104
+ return QUICK_ACTIONS.get(template_name, "")
105
+
106
+
107
+ # ============================================================================
108
+ # Build Gradio UI
109
+ # ============================================================================
110
+ with gr.Blocks(
111
+ title="SuperCoder Pro",
112
+ theme=gr.themes.Soft(primary_hue="indigo"),
113
+ css="""
114
+ .container { max-width: 1200px; margin: auto; }
115
+ .status-box { padding: 10px; border-radius: 5px; margin: 10px 0; }
116
+ .status-connected { background-color: #d4edda; }
117
+ .status-disconnected { background-color: #f8d7da; }
118
+ """
119
+ ) as demo:
120
+
121
+ # Header
122
+ gr.Markdown("""
123
+ # πŸ€– SuperCoder Pro
124
+ ### AI-Powered Coding Assistant
125
+
126
+ Your personal AI coding assistant powered by local hardware. Ask me to write,
127
+ explain, debug, or optimize code!
128
+
129
+ ---
130
+ """)
131
+
132
+ # API Status
133
+ with gr.Row():
134
+ status_text = gr.Textbox(
135
+ value=check_api_status(),
136
+ label="πŸ”Œ API Status",
137
+ interactive=False,
138
+ show_label=True
139
+ )
140
+ refresh_btn = gr.Button("πŸ”„ Refresh Status", size="sm")
141
+
142
+ refresh_btn.click(
143
+ fn=check_api_status,
144
+ outputs=status_text
145
+ )
146
+
147
+ # Main Interface
148
+ with gr.Row():
149
+ # Left Column: Chat
150
+ with gr.Column(scale=3):
151
+ chatbot = gr.Chatbot(
152
+ label="πŸ’¬ Conversation",
153
+ height=500,
154
+ show_copy_button=True,
155
+ avatar_images=(None, "πŸ€–")
156
+ )
157
+
158
+ with gr.Row():
159
+ msg_input = gr.Textbox(
160
+ placeholder="Ask me to write, explain, debug, or review code...",
161
+ show_label=False,
162
+ scale=5,
163
+ lines=2
164
+ )
165
+ send_btn = gr.Button("Send πŸš€", scale=1, variant="primary")
166
+
167
+ # Right Column: Settings & Actions
168
+ with gr.Column(scale=1):
169
+ gr.Markdown("### βš™οΈ Settings")
170
+
171
+ temperature = gr.Slider(
172
+ 0.0, 1.0,
173
+ value=0.1,
174
+ step=0.05,
175
+ label="🌑️ Temperature",
176
+ info="Lower = precise, Higher = creative"
177
+ )
178
+
179
+ max_tokens = gr.Slider(
180
+ 128, 2048,
181
+ value=512,
182
+ step=128,
183
+ label="πŸ“ Max Tokens",
184
+ info="Response length limit"
185
+ )
186
+
187
+ gr.Markdown("### 🎯 Quick Actions")
188
+
189
+ template_dropdown = gr.Dropdown(
190
+ choices=list(QUICK_ACTIONS.keys()),
191
+ label="Select Template",
192
+ value=None
193
+ )
194
+
195
+ use_template_btn = gr.Button("Use Template", size="sm")
196
+
197
+ clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="stop", size="sm")
198
+
199
+ # Event Handlers
200
+ msg_input.submit(
201
+ fn=chat_interface,
202
+ inputs=[msg_input, chatbot, temperature, max_tokens],
203
+ outputs=[chatbot, msg_input]
204
+ )
205
+
206
+ send_btn.click(
207
+ fn=chat_interface,
208
+ inputs=[msg_input, chatbot, temperature, max_tokens],
209
+ outputs=[chatbot, msg_input]
210
+ )
211
+
212
+ use_template_btn.click(
213
+ fn=use_template,
214
+ inputs=[template_dropdown],
215
+ outputs=[msg_input]
216
+ )
217
+
218
+ clear_btn.click(
219
+ fn=lambda: ([], ""),
220
+ outputs=[chatbot, msg_input]
221
+ )
222
+
223
+ # Footer
224
+ gr.Markdown("""
225
+ ---
226
+
227
+ ### πŸ’‘ Tips
228
+ - **Be specific** in your requests for better results
229
+ - **Paste code** directly in your messages
230
+ - Use **templates** for common tasks
231
+ - Adjust **temperature** for more creative or precise outputs
232
+
233
+ ### ⚠️ Important
234
+ This Space connects to a **locally-running** AI model via tunnel.
235
+ If you see connection errors, the local server may be offline.
236
+
237
+ ### πŸ”’ Privacy
238
+ - All processing happens on the owner's local machine
239
+ - No data is stored by Hugging Face
240
+ - Each chat session is independent
241
+
242
+ ---
243
+
244
+ **Built with ❀️ using llama.cpp and Gradio**
245
+ """)
246
+
247
+
248
+ # Launch the app
249
+ if __name__ == "__main__":
250
+ demo.launch(
251
+ server_name="0.0.0.0",
252
+ server_port=7860,
253
+ show_error=True
254
+ )