Spaces:
Sleeping
Sleeping
acecalisto3
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
import random
|
@@ -7,10 +8,12 @@ from datetime import datetime
|
|
7 |
import logging
|
8 |
|
9 |
import gradio as gr
|
10 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline,
|
11 |
-
import InferenceClient, cached_download, Repository
|
12 |
from IPython.display import display, HTML
|
13 |
import streamlit.components.v1 as components
|
|
|
|
|
14 |
|
15 |
# --- Configuration ---
|
16 |
VERBOSE = True
|
@@ -59,7 +62,7 @@ def model_selection():
|
|
59 |
st.write("Select a model to use for code generation:")
|
60 |
models = ["distilbert", "t5", "codellama-7b", "geminai-1.5b"]
|
61 |
selected_model = st.selectbox("Select a model:", models)
|
62 |
-
if
|
63 |
model = load_model(selected_model)
|
64 |
if model:
|
65 |
st.write(f"Model {selected_model} imported successfully!")
|
@@ -72,54 +75,65 @@ def run_command(command: str, project_path: str = None) -> str:
|
|
72 |
"""Executes a shell command and returns the output."""
|
73 |
try:
|
74 |
if project_path:
|
75 |
-
process = subprocess.Popen(command, shell=True,
|
|
|
76 |
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
77 |
output, error = process.communicate()
|
78 |
if error:
|
79 |
-
return f"
|
80 |
-
return.decode(
|
81 |
except Exception as e:
|
82 |
-
return f"
|
83 |
-
|
|
|
84 |
"""Creates a new Hugging Face project."""
|
85 |
global repo
|
86 |
-
try
|
87 |
-
|
|
|
88 |
# Create the repository
|
89 |
repo = Repository(local_dir=project_path, clone_from=None)
|
90 |
repo.git_init()
|
91 |
-
# Add basic
|
92 |
-
|
|
|
|
|
|
|
93 |
repo.git_commit(commit_message="Initial commit")
|
94 |
-
return f"
|
95 |
except Exception as e:
|
96 |
-
return f"
|
97 |
-
|
|
|
98 |
"""Lists files in the project directory."""
|
99 |
try:
|
100 |
files = os.listdir(project_path)
|
101 |
if not files:
|
102 |
return "Project directory is empty."
|
103 |
return "\n".join(files)
|
104 |
-
except Exception as e:
|
105 |
-
return f"
|
106 |
-
|
|
|
|
|
107 |
try:
|
108 |
-
|
109 |
with open(full_path, "r") as f:
|
110 |
content = f.read()
|
111 |
return content
|
112 |
except Exception as e:
|
113 |
-
return f"
|
114 |
-
|
|
|
115 |
"""Writes content to a file in the project."""
|
116 |
try:
|
117 |
-
full_path = os.path.join(
|
118 |
-
with open(full_path, "") as f:
|
119 |
-
f.(
|
120 |
-
return"Successfully wrote to '{
|
121 |
except Exception as e:
|
122 |
-
return f"
|
|
|
123 |
def preview(project_path: str = DEFAULT_PROJECT_PATH):
|
124 |
"""Provides a preview of the project, if applicable."""
|
125 |
# Assuming a simple HTML preview for now
|
@@ -133,146 +147,166 @@ def preview(project_path: str = DEFAULT_PROJECT_PATH):
|
|
133 |
else:
|
134 |
return "No 'index.html' found for preview."
|
135 |
except Exception as e:
|
136 |
-
return f
|
|
|
137 |
def main():
|
138 |
-
|
139 |
-
gr.Markdown("## IDEvIII: Your Hugging No- App Builder")
|
140 |
-
|
|
|
|
|
|
|
141 |
model_categories = gr.Dropdown(
|
142 |
-
choices=Text Generation", "Text Summarization", "Code Generation", "Translation", "Question Answering"],
|
143 |
label="Model Category",
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
148 |
)
|
149 |
load_button = gr.Button("Load Model")
|
150 |
load_output = gr.Textbox(label="Output")
|
151 |
model_description = gr.Markdown(label="Model Description")
|
152 |
-
|
153 |
-
|
|
|
154 |
models = []
|
155 |
api = HfApi()
|
156 |
for model in api.list_models():
|
157 |
-
if model.pipeline_tag ==
|
158 |
-
models.append(model.modelId)
|
|
|
|
|
159 |
# --- Event handler for category dropdown ---
|
160 |
model_categories.change(
|
161 |
-
fn=
|
|
|
162 |
outputs=model_name,
|
163 |
)
|
|
|
164 |
# --- Event handler to display model description ---
|
165 |
def display_model_description(model_name):
|
166 |
global model_descriptions
|
167 |
if model_name in model_descriptions:
|
168 |
-
return model_descriptions[
|
169 |
else:
|
170 |
-
return "Model description available."
|
171 |
model_name.change(
|
172 |
-
|
173 |
inputs=model_name,
|
174 |
outputs=model_description,
|
175 |
)
|
|
|
176 |
# --- Event handler to load the selected model ---
|
177 |
def load_selected_model(model_name):
|
178 |
global current_model
|
179 |
load_output = load_model(model_name)
|
180 |
if current_model:
|
181 |
-
return f"
|
182 |
else:
|
183 |
-
return f"
|
184 |
load_button.click(load_selected_model, inputs=model_name, outputs=load_output)
|
|
|
185 |
# --- Chat Interface ---
|
186 |
-
with gr.Tab("Chat
|
187 |
-
chatbot gr.Chatbot(show_label=False, show_share_button=
|
188 |
-
message = gr.Textbox(Enter your message="Ask me anything!")
|
189 |
-
purpose = gr.Textbox(label="Purpose", placeholder="What is the of this interaction)
|
190 |
-
agent_name = gr.(label="
|
191 |
-
|
192 |
-
temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more
|
193 |
-
|
194 |
-
|
|
|
195 |
submit_button = gr.Button(value="Send")
|
196 |
history = gr.State([])
|
197 |
-
|
|
|
198 |
if not current_model:
|
199 |
return [(history, history), "Please load a model first."]
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
|
|
|
|
|
|
|
|
|
|
275 |
# Launch the interface
|
276 |
-
demo.launch(server_name=
|
277 |
-
|
|
|
278 |
main()
|
|
|
1 |
+
|
2 |
import os
|
3 |
import subprocess
|
4 |
import random
|
|
|
8 |
import logging
|
9 |
|
10 |
import gradio as gr
|
11 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, HfApi
|
12 |
+
from huggingface_hub import InferenceClient, cached_download, Repository
|
13 |
from IPython.display import display, HTML
|
14 |
import streamlit.components.v1 as components
|
15 |
+
import tempfile
|
16 |
+
import shutil
|
17 |
|
18 |
# --- Configuration ---
|
19 |
VERBOSE = True
|
|
|
62 |
st.write("Select a model to use for code generation:")
|
63 |
models = ["distilbert", "t5", "codellama-7b", "geminai-1.5b"]
|
64 |
selected_model = st.selectbox("Select a model:", models)
|
65 |
+
if selected_model:
|
66 |
model = load_model(selected_model)
|
67 |
if model:
|
68 |
st.write(f"Model {selected_model} imported successfully!")
|
|
|
75 |
"""Executes a shell command and returns the output."""
|
76 |
try:
|
77 |
if project_path:
|
78 |
+
process = subprocess.Popen(command, shell=True, cwd=project_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
79 |
+
else:
|
80 |
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
81 |
output, error = process.communicate()
|
82 |
if error:
|
83 |
+
return f"Error: {error.decode('utf-8')}"
|
84 |
+
return output.decode('utf-8')
|
85 |
except Exception as e:
|
86 |
+
return f"Error executing command: {str(e)}"
|
87 |
+
|
88 |
+
def create_project(project_name: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
|
89 |
"""Creates a new Hugging Face project."""
|
90 |
global repo
|
91 |
+
try:
|
92 |
+
if os.path.exists(project_path):
|
93 |
+
return f"Error: Directory '{project_path}' already exists!"
|
94 |
# Create the repository
|
95 |
repo = Repository(local_dir=project_path, clone_from=None)
|
96 |
repo.git_init()
|
97 |
+
# Add basic files (optional, can customize this)
|
98 |
+
with open(os.path.join(project_path, "README.md"), "w") as f:
|
99 |
+
f.write(f"# {project_name}\n\nA new Hugging Face project.")
|
100 |
+
# Stage all changes
|
101 |
+
repo.git_add(pattern="*")
|
102 |
repo.git_commit(commit_message="Initial commit")
|
103 |
+
return f"Hugging Face project '{project_name}' created successfully at '{project_path}'"
|
104 |
except Exception as e:
|
105 |
+
return f"Error creating Hugging Face project: {str(e)}"
|
106 |
+
|
107 |
+
def list_files(project_path: str = DEFAULT_PROJECT_PATH) -> str:
|
108 |
"""Lists files in the project directory."""
|
109 |
try:
|
110 |
files = os.listdir(project_path)
|
111 |
if not files:
|
112 |
return "Project directory is empty."
|
113 |
return "\n".join(files)
|
114 |
+
except Exception as e:
|
115 |
+
return f"Error listing project files: {str(e)}"
|
116 |
+
|
117 |
+
def read_file(filepath: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
|
118 |
+
"""Reads and returns the content of a file in the project."""
|
119 |
try:
|
120 |
+
full_path = os.path.join(project_path, filepath)
|
121 |
with open(full_path, "r") as f:
|
122 |
content = f.read()
|
123 |
return content
|
124 |
except Exception as e:
|
125 |
+
return f"Error reading file: {str(e)}"
|
126 |
+
|
127 |
+
def write_file(filepath: str, content: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
|
128 |
"""Writes content to a file in the project."""
|
129 |
try:
|
130 |
+
full_path = os.path.join(project_path, filepath)
|
131 |
+
with open(full_path, "w") as f:
|
132 |
+
f.write(content)
|
133 |
+
return f"Successfully wrote to '{full_path}'"
|
134 |
except Exception as e:
|
135 |
+
return f"Error writing to file: {str(e)}"
|
136 |
+
|
137 |
def preview(project_path: str = DEFAULT_PROJECT_PATH):
|
138 |
"""Provides a preview of the project, if applicable."""
|
139 |
# Assuming a simple HTML preview for now
|
|
|
147 |
else:
|
148 |
return "No 'index.html' found for preview."
|
149 |
except Exception as e:
|
150 |
+
return f"Error previewing project: {str(e)}"
|
151 |
+
|
152 |
def main():
|
153 |
+
with gr.Blocks() as demo:
|
154 |
+
gr.Markdown("## IDEvIII: Your Hugging Face No-Code App Builder")
|
155 |
+
|
156 |
+
# --- Model Selection ---
|
157 |
+
with gr.Tab("Model Selection"):
|
158 |
+
# --- Model Dropdown with Categories ---
|
159 |
model_categories = gr.Dropdown(
|
160 |
+
choices=["Text Generation", "Text Summarization", "Code Generation", "Translation", "Question Answering"],
|
161 |
label="Model Category",
|
162 |
+
value="Text Generation"
|
163 |
+
)
|
164 |
+
model_name = gr.Dropdown(
|
165 |
+
choices=[], # Initially empty, will be populated based on category
|
166 |
+
label="Hugging Face Model Name",
|
167 |
)
|
168 |
load_button = gr.Button("Load Model")
|
169 |
load_output = gr.Textbox(label="Output")
|
170 |
model_description = gr.Markdown(label="Model Description")
|
171 |
+
|
172 |
+
# --- Function to populate model names based on category ---
|
173 |
+
def update_model_dropdown(category):
|
174 |
models = []
|
175 |
api = HfApi()
|
176 |
for model in api.list_models():
|
177 |
+
if model.pipeline_tag == category:
|
178 |
+
models.append(model.modelId)
|
179 |
+
return gr.Dropdown.update(choices=models)
|
180 |
+
|
181 |
# --- Event handler for category dropdown ---
|
182 |
model_categories.change(
|
183 |
+
fn=update_model_dropdown,
|
184 |
+
inputs=model_categories,
|
185 |
outputs=model_name,
|
186 |
)
|
187 |
+
|
188 |
# --- Event handler to display model description ---
|
189 |
def display_model_description(model_name):
|
190 |
global model_descriptions
|
191 |
if model_name in model_descriptions:
|
192 |
+
return model_descriptions[model_name]
|
193 |
else:
|
194 |
+
return "Model description not available."
|
195 |
model_name.change(
|
196 |
+
fn=display_model_description,
|
197 |
inputs=model_name,
|
198 |
outputs=model_description,
|
199 |
)
|
200 |
+
|
201 |
# --- Event handler to load the selected model ---
|
202 |
def load_selected_model(model_name):
|
203 |
global current_model
|
204 |
load_output = load_model(model_name)
|
205 |
if current_model:
|
206 |
+
return f"Model '{model_name}' loaded successfully!"
|
207 |
else:
|
208 |
+
return f"Error loading model '{model_name}'"
|
209 |
load_button.click(load_selected_model, inputs=model_name, outputs=load_output)
|
210 |
+
|
211 |
# --- Chat Interface ---
|
212 |
+
with gr.Tab("Chat"):
|
213 |
+
chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True)
|
214 |
+
message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
|
215 |
+
purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
|
216 |
+
agent_name = gr.Textbox(label="Agent Name", value="Generic Agent", interactive=True)
|
217 |
+
sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
|
218 |
+
temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more random results")
|
219 |
+
max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048 * 10, step=64, interactive=True, info="The maximum number of new tokens")
|
220 |
+
top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
|
221 |
+
repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
|
222 |
submit_button = gr.Button(value="Send")
|
223 |
history = gr.State([])
|
224 |
+
|
225 |
+
def run_chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
|
226 |
if not current_model:
|
227 |
return [(history, history), "Please load a model first."]
|
228 |
+
def generate_response(message, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty):
|
229 |
+
if not current_model:
|
230 |
+
return "Please load a model first."
|
231 |
+
conversation = [{"role": "system", "content": sys_prompt}]
|
232 |
+
for message, response in history:
|
233 |
+
conversation.append({"role": "user", "content": message})
|
234 |
+
conversation.append({"role": "assistant", "content": response})
|
235 |
+
conversation.append({"role": "user", "content": message})
|
236 |
+
response = current_model.generate(
|
237 |
+
conversation,
|
238 |
+
max_new_tokens=max_new_tokens,
|
239 |
+
temperature=temperature,
|
240 |
+
top_p=top_p,
|
241 |
+
repetition_penalty=repetition_penalty
|
242 |
+
)
|
243 |
+
return response.text.strip()
|
244 |
+
response_text = generate_response(message, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
|
245 |
+
history.append((message, response_text))
|
246 |
+
return history, history
|
247 |
+
|
248 |
+
submit_button.click(run_chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
|
249 |
+
|
250 |
+
# --- Project Management ---
|
251 |
+
with gr.Tab("Project Management"):
|
252 |
+
project_name_input = gr.Textbox(label="Project Name", placeholder="Enter project name")
|
253 |
+
create_project_button = gr.Button("Create Project")
|
254 |
+
project_output = gr.Textbox(label="Output")
|
255 |
+
|
256 |
+
def create_project_action(project_name):
|
257 |
+
return create_project(project_name)
|
258 |
+
|
259 |
+
create_project_button.click(create_project_action, inputs=project_name_input, outputs=project_output)
|
260 |
+
|
261 |
+
list_files_button = gr.Button("List Files")
|
262 |
+
list_files_output = gr.Textbox(label="Files")
|
263 |
+
|
264 |
+
def list_files_action():
|
265 |
+
return list_files()
|
266 |
+
|
267 |
+
list_files_button.click(list_files_action, outputs=list_files_output)
|
268 |
+
|
269 |
+
file_path_input = gr.Textbox(label="File Path", placeholder="Enter file path")
|
270 |
+
read_file_button = gr.Button("Read File")
|
271 |
+
read_file_output = gr.Textbox(label="File Content")
|
272 |
+
|
273 |
+
def read_file_action(file_path):
|
274 |
+
return read_file(file_path)
|
275 |
+
|
276 |
+
read_file_button.click(read_file_action, inputs=file_path_input, outputs=read_file_output)
|
277 |
+
|
278 |
+
write_file_button = gr.Button("Write File")
|
279 |
+
file_content_input = gr.Textbox(label="File Content", placeholder="Enter file content")
|
280 |
+
|
281 |
+
def write_file_action(file_path, file_content):
|
282 |
+
return write_file(file_path, file_content)
|
283 |
+
|
284 |
+
write_file_button.click(write_file_action, inputs=[file_path_input, file_content_input], outputs=project_output)
|
285 |
+
|
286 |
+
run_command_input = gr.Textbox(label="Command", placeholder="Enter command")
|
287 |
+
run_command_button = gr.Button("Run Command")
|
288 |
+
run_command_output = gr.Textbox(label="Command Output")
|
289 |
+
|
290 |
+
def run_command_action(command):
|
291 |
+
return run_command(command)
|
292 |
+
|
293 |
+
run_command_button.click(run_command_action, inputs=run_command_input, outputs=run_command_output)
|
294 |
+
|
295 |
+
preview_button = gr.Button("Preview Project")
|
296 |
+
preview_output = gr.Textbox(label="Preview URL")
|
297 |
+
|
298 |
+
def preview_action():
|
299 |
+
return preview()
|
300 |
+
|
301 |
+
preview_button.click(preview_action, outputs=preview_output)
|
302 |
+
|
303 |
+
# Custom server settings
|
304 |
+
server_name = "0.0.0.0" # Listen on all available network interfaces
|
305 |
+
server_port = 7860 # Choose an available port
|
306 |
+
share_gradio_link = True # Share a public URL for the app
|
307 |
+
|
308 |
# Launch the interface
|
309 |
+
demo.launch(server_name=server_name, server_port=server_port, share=share_gradio_link)
|
310 |
+
|
311 |
+
if __name__ == "__main__":
|
312 |
main()
|