acecalisto3 commited on
Commit
15b16d0
1 Parent(s): df6a1f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +139 -291
app.py CHANGED
@@ -5,13 +5,11 @@ import time
5
  from typing import Dict, List, Tuple
6
  from datetime import datetime
7
  import logging
 
8
  import gradio as gr
9
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, huggingface_hub
10
  from huggingface_hub import InferenceClient, cached_download, Repository, HfApi
11
  from IPython.display import display, HTML
12
- import streamlit.components.v1 as components
13
- import tempfile
14
- import shutil
15
 
16
  # --- Configuration ---
17
  VERBOSE = True
@@ -33,12 +31,56 @@ logging.basicConfig(
33
  current_model = None # Store the currently loaded model
34
  repo = None # Store the Hugging Face Repository object
35
  model_descriptions = {} # Store model descriptions
36
- project_path = DEFAULT_PROJECT_PATH # Default project path
37
 
38
  # --- Functions ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
 
40
 
41
- def load_model(model_name: str):
42
  """Loads a language model and fetches its description."""
43
  global current_model, model_descriptions
44
  try:
@@ -47,8 +89,9 @@ def load_model(model_name: str):
47
  "text-generation",
48
  model=model_name,
49
  tokenizer=tokenizer,
50
- model_kwargs={"load_in_8bit": True},
51
  )
 
52
  # Fetch and store the model description
53
  api = HfApi()
54
  model_info = api.model_info(model_name)
@@ -57,52 +100,43 @@ def load_model(model_name: str):
57
  except Exception as e:
58
  return f"Error loading model: {str(e)}"
59
 
60
-
61
- def run_command(command: str, project_path: str = None) -> str:
62
  """Executes a shell command and returns the output."""
63
  try:
64
  if project_path:
65
- process = subprocess.Popen(
66
- command,
67
- shell=True,
68
- cwd=project_path,
69
- stdout=subprocess.PIPE,
70
- stderr=subprocess.PIPE,
71
- )
72
  else:
73
- process = subprocess.Popen(
74
- command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
75
- )
76
  output, error = process.communicate()
77
  if error:
78
- return f"""Error: {error.decode('utf-8')}"""
79
  return output.decode("utf-8")
80
  except Exception as e:
81
- return f"""Error executing command: {str(e)}"""
82
-
83
 
84
- def create_project(project_name: str, project_path: str = DEFAULT_PROJECT_PATH):
85
  """Creates a new Hugging Face project."""
86
- global repo, project_path
87
  try:
88
  if os.path.exists(project_path):
89
- return f"""Error: Directory '{project_path}' already exists!"""
90
  # Create the repository
91
  repo = Repository(local_dir=project_path, clone_from=None)
92
  repo.git_init()
93
- # Add basic files (optional, can customize this)
 
94
  with open(os.path.join(project_path, "README.md"), "w") as f:
95
- f.write(f"{project_name}\n\nA new Hugging Face project.")
 
96
  # Stage all changes
97
  repo.git_add(pattern="*")
98
  repo.git_commit(commit_message="Initial commit")
99
- project_path = os.path.join(project_path, project_name) # Update project path
100
- return f"""Hugging Face project '{project_name}' created successfully at '{project_path}'"""
101
- except Exception as e:
102
- return f"""Error creating Hugging Face project: {str(e)}"""
103
 
 
 
 
104
 
105
- def list_files(project_path: str = DEFAULT_PROJECT_PATH) -> str:
106
  """Lists files in the project directory."""
107
  try:
108
  files = os.listdir(project_path)
@@ -110,10 +144,9 @@ def list_files(project_path: str = DEFAULT_PROJECT_PATH) -> str:
110
  return "Project directory is empty."
111
  return "\n".join(files)
112
  except Exception as e:
113
- return f"""Error listing project files: {str(e)}"""
114
-
115
 
116
- def read_file(file_path: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
117
  """Reads and returns the content of a file in the project."""
118
  try:
119
  full_path = os.path.join(project_path, file_path)
@@ -121,21 +154,19 @@ def read_file(file_path: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
121
  content = f.read()
122
  return content
123
  except Exception as e:
124
- return f"""Error reading file: {str(e)}"""
125
 
126
-
127
- def write_file(file_path: str, content: str, project_path: str = DEFAULT_PROJECT_PATH):
128
  """Writes content to a file in the project."""
129
  try:
130
  full_path = os.path.join(project_path, file_path)
131
  with open(full_path, "w") as f:
132
  f.write(content)
133
- return f"Successfully wrote to '{full_path}'"
134
  except Exception as e:
135
- return f"""Error writing to file: {str(e)}"""
136
-
137
 
138
- def preview(project_path: str = DEFAULT_PROJECT_PATH):
139
  """Provides a preview of the project, if applicable."""
140
  # Assuming a simple HTML preview for now
141
  try:
@@ -148,140 +179,19 @@ def preview(project_path: str = DEFAULT_PROJECT_PATH):
148
  else:
149
  return "No 'index.html' found for preview."
150
  except Exception as e:
151
- return f"""Error previewing project: {str(e)}"""
152
-
153
-
154
- def generate_response(
155
- message: str,
156
- history: List[Tuple[str, str]],
157
- agent_name: str,
158
- sys_prompt: str,
159
- temperature: float,
160
- max_new_tokens: int,
161
- top_p: float,
162
- repetition_penalty: float,
163
- ) -> str:
164
- """Generates a response using the loaded model."""
165
- if not current_model:
166
- return "Please load a model first."
167
- conversation = [{"role": "system", "content": sys_prompt}]
168
- for message, response in history:
169
- conversation.append({"role": "user", "content": message})
170
- conversation.append({"role": "assistant", "content": response})
171
- conversation.append({"role": "user", "content": message})
172
- response = current_model.generate(
173
- conversation,
174
- max_new_tokens=max_new_tokens,
175
- temperature=temperature,
176
- top_p=top_p,
177
- repetition_penalty=repetition_penalty,
178
- )
179
- return response.text.strip()
180
-
181
-
182
- def run_chat(
183
- purpose: str,
184
- message: str,
185
- agent_name: str,
186
- sys_prompt: str,
187
- temperature: float,
188
- max_new_tokens: int,
189
- top_p: float,
190
- repetition_penalty: float,
191
- history: List[Tuple[str, str]],
192
- ) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
193
- """Handles the chat interaction."""
194
- if not current_model:
195
- return [(history, history), "Please load a model first."]
196
- response = generate_response(
197
- message,
198
- history,
199
- agent_name,
200
- sys_prompt,
201
- temperature,
202
- max_new_tokens,
203
- top_p,
204
- repetition_penalty,
205
- )
206
- history.append((message, response))
207
- return [(history, history), response]
208
-
209
-
210
- def update_model_dropdown(category):
211
- """Populates the model dropdown based on the selected category."""
212
- models = []
213
- api = HfApi()
214
- for model in api.list_models():
215
- if model.pipeline_tag == category:
216
- models.append(model.modelId)
217
- return gr.Dropdown.update(choices=models)
218
-
219
-
220
- def display_model_description(model_name):
221
- """Displays the description of the selected model."""
222
- global model_descriptions
223
- if model_name in model_descriptions:
224
- return model_descriptions[model_name]
225
- else:
226
- return "Model description not available."
227
-
228
-
229
- def load_selected_model(model_name):
230
- """Loads the selected model."""
231
- global current_model
232
- load_output = load_model(model_name)
233
- if current_model:
234
- return f"""Model '{model_name}' loaded successfully!"""
235
- else:
236
- return f"""Error loading model '{model_name}'"""
237
-
238
-
239
- def create_project_handler(project_name):
240
- """Handles the creation of a new project."""
241
- return create_project(project_name)
242
-
243
-
244
- def list_files_handler():
245
- """Handles the listing of files in the project directory."""
246
- return list_files(project_path)
247
-
248
-
249
- def read_file_handler(file_path):
250
- """Handles the reading of a file in the project."""
251
- return read_file(file_path, project_path)
252
-
253
-
254
- def write_file_handler(file_path, file_content):
255
- """Handles the writing of content to a file in the project."""
256
- return write_file(file_path, file_content, project_path)
257
-
258
-
259
- def run_command_handler(command):
260
- """Handles the execution of a shell command."""
261
- return run_command(command, project_path)
262
-
263
-
264
- def preview_handler():
265
- """Handles the preview of the project."""
266
- return preview(project_path)
267
-
268
 
269
  def main():
270
- """Main function to launch the Gradio interface."""
271
  with gr.Blocks() as demo:
272
- gr.Markdown("## IDEvIII: Your Hugging Face No-Code App Builder")
 
273
  # --- Model Selection ---
274
  with gr.Tab("Model"):
 
275
  model_categories = gr.Dropdown(
276
- choices=[
277
- "Text Generation",
278
- "Text Summarization",
279
- "Code Generation",
280
- "Translation",
281
- "Question Answering",
282
- ],
283
  label="Model Category",
284
- value="Text Generation",
285
  )
286
  model_name = gr.Dropdown(
287
  choices=[], # Initially empty, will be populated based on category
@@ -291,142 +201,80 @@ def main():
291
  load_output = gr.Textbox(label="Output")
292
  model_description = gr.Markdown(label="Model Description")
293
 
 
 
 
 
 
 
 
 
 
 
294
  model_categories.change(
295
- fn=update_model_dropdown, inputs=model_categories, outputs=model_name
 
 
296
  )
 
 
 
 
 
 
 
 
 
297
  model_name.change(
298
- fn=display_model_description, inputs=model_name, outputs=model_description
299
- )
300
- load_button.click(
301
- load_selected_model, inputs=model_name, outputs=load_output
302
  )
303
 
 
 
304
  # --- Chat Interface ---
305
  with gr.Tab("Chat"):
306
- chatbot = gr.Chatbot(
307
- show_label=False,
308
- show_share_button=False,
309
- show_copy_button=True,
310
- likeable=True,
311
- )
312
- message = gr.Textbox(
313
- label="Enter your message", placeholder="Ask me anything!"
314
- )
315
- purpose = gr.Textbox(
316
- label="Purpose", placeholder="What is the purpose of this interaction?"
317
- )
318
- agent_name = gr.Textbox(
319
- label="Agent Name", value="Generic Agent", interactive=True
320
- )
321
- sys_prompt = gr.Textbox(
322
- label="System Prompt", max_lines=1, interactive=True
323
- )
324
- temperature = gr.Slider(
325
- label="Temperature",
326
- value=TEMPERATURE,
327
- minimum=0.0,
328
- maximum=1.0,
329
- step=0.05,
330
- interactive=True,
331
- info="Higher values produce more creative text.",
332
- )
333
- max_new_tokens = gr.Slider(
334
- label="Max new tokens",
335
- value=MAX_TOKENS,
336
- minimum=0,
337
- maximum=1048 * 10,
338
- step=64,
339
- interactive=True,
340
- info="The maximum number of new tokens to generate.",
341
- )
342
- top_p = gr.Slider(
343
- label="Top-p (nucleus sampling)",
344
- value=TOP_P,
345
- minimum=0,
346
- maximum=1,
347
- step=0.05,
348
- interactive=True,
349
- info="Higher values sample more low-probability tokens.",
350
- )
351
- repetition_penalty = gr.Slider(
352
- label="Repetition penalty",
353
- value=REPETITION_PENALTY,
354
- minimum=1.0,
355
- maximum=2.0,
356
- step=0.05,
357
- interactive=True,
358
- info="Penalize repeated tokens.",
359
- )
360
  submit_button = gr.Button(value="Send")
361
  history = gr.State([])
362
- submit_button.click(
363
- run_chat,
364
- inputs=[
365
- purpose,
366
- message,
367
- agent_name,
368
- sys_prompt,
369
- temperature,
370
- max_new_tokens,
371
- top_p,
372
- repetition_penalty,
373
- history,
374
- ],
375
- outputs=[chatbot, history],
376
- )
377
 
378
  # --- Project Management ---
379
  with gr.Tab("Project"):
380
- project_name = gr.Textbox(label="Project Name")
381
- create_project_button = gr.Button("Create Project")
382
- create_project_output = gr.Textbox(label="Output")
383
- list_files_button = gr.Button("List Files")
384
- list_files_output = gr.Textbox(label="Output")
385
- file_path = gr.Textbox(label="File Path")
386
- read_file_button = gr.Button("Read File")
387
- read_file_output = gr.Textbox(label="Output")
388
- file_content = gr.Textbox(label="File Content")
389
- write_file_button = gr.Button("Write File")
390
- write_file_output = gr.Textbox(label="Output")
391
- run_command_input = gr.Textbox(label="Command")
392
  run_command_button = gr.Button("Run Command")
393
- run_command_output = gr.Textbox(label="Output")
394
- preview_button = gr.Button("Preview")
395
- preview_output = gr.Textbox(label="Output")
396
 
397
- create_project_button.click(
398
- create_project_handler, inputs=project_name, outputs=create_project_output
399
- )
400
- list_files_button.click(
401
- list_files_handler, outputs=list_files_output
402
- )
403
- read_file_button.click(
404
- read_file_handler, inputs=file_path, outputs=read_file_output
405
- )
406
- write_file_button.click(
407
- write_file_handler,
408
- inputs=[file_path, file_content],
409
- outputs=write_file_output,
410
- )
411
- run_command_button.click(
412
- run_command_handler, inputs=run_command_input, outputs=run_command_output
413
- )
414
- preview_button.click(
415
- preview_handler, outputs=preview_output
416
- )
417
 
418
- # --- Custom Server Settings ---
419
- server_name = "0.0.0.0" # Listen on available network interfaces
420
- server_port = 7860 # Choose an available port
421
- share_gradio_link = True # Share a public URL for the app
422
-
423
- # --- Launch the Interface ---
424
- demo.launch(
425
- server_name=server_name,
426
- server_port=server_port,
427
- share=share_gradio_link,
428
- )
429
 
430
- gr.load("models/mistralai/Mistral-Large-Instruct-2407").launch()
431
  if __name__ == "__main__":
432
- main()
 
5
  from typing import Dict, List, Tuple
6
  from datetime import datetime
7
  import logging
8
+
9
  import gradio as gr
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
11
  from huggingface_hub import InferenceClient, cached_download, Repository, HfApi
12
  from IPython.display import display, HTML
 
 
 
13
 
14
  # --- Configuration ---
15
  VERBOSE = True
 
31
  current_model = None # Store the currently loaded model
32
  repo = None # Store the Hugging Face Repository object
33
  model_descriptions = {} # Store model descriptions
 
34
 
35
  # --- Functions ---
36
+ def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
37
+ prompt = ""
38
+ for user_prompt, bot_response in history[-max_history_turns:]:
39
+ prompt += f"Human: {user_prompt}\nAssistant: {bot_response}\n"
40
+ prompt += f"Human: {message}\nAssistant:"
41
+ return prompt
42
+
43
+ def generate_response(
44
+ prompt: str,
45
+ history: List[Tuple[str, str]],
46
+ agent_name: str = "Generic Agent",
47
+ sys_prompt: str = "",
48
+ temperature: float = TEMPERATURE,
49
+ max_new_tokens: int = MAX_TOKENS,
50
+ top_p: float = TOP_P,
51
+ repetition_penalty: float = REPETITION_PENALTY,
52
+ ) -> str:
53
+ global current_model
54
+ if current_model is None:
55
+ return "Error: Please load a model first."
56
+
57
+ date_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
58
+ full_prompt = PREFIX.format(
59
+ date_time_str=date_time_str,
60
+ purpose=sys_prompt,
61
+ agent_name=agent_name
62
+ ) + format_prompt(prompt, history)
63
+
64
+ if VERBOSE:
65
+ logging.info(LOG_PROMPT.format(content=full_prompt))
66
+
67
+ response = current_model(
68
+ full_prompt,
69
+ max_new_tokens=max_new_tokens,
70
+ temperature=temperature,
71
+ top_p=top_p,
72
+ repetition_penalty=repetition_penalty,
73
+ do_sample=True
74
+ )[0]['generated_text']
75
+
76
+ assistant_response = response.split("Assistant:")[-1].strip()
77
+
78
+ if VERBOSE:
79
+ logging.info(LOG_RESPONSE.format(resp=assistant_response))
80
 
81
+ return assistant_response
82
 
83
+ def load_hf_model(model_name: str):
84
  """Loads a language model and fetches its description."""
85
  global current_model, model_descriptions
86
  try:
 
89
  "text-generation",
90
  model=model_name,
91
  tokenizer=tokenizer,
92
+ model_kwargs={"load_in_8bit": True}
93
  )
94
+
95
  # Fetch and store the model description
96
  api = HfApi()
97
  model_info = api.model_info(model_name)
 
100
  except Exception as e:
101
  return f"Error loading model: {str(e)}"
102
 
103
+ def execute_command(command: str, project_path: str = None) -> str:
 
104
  """Executes a shell command and returns the output."""
105
  try:
106
  if project_path:
107
+ process = subprocess.Popen(command, shell=True, cwd=project_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 
 
 
 
 
 
108
  else:
109
+ process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 
 
110
  output, error = process.communicate()
111
  if error:
112
+ return f"Error: {error.decode('utf-8')}"
113
  return output.decode("utf-8")
114
  except Exception as e:
115
+ return f"Error executing command: {str(e)}"
 
116
 
117
+ def create_hf_project(project_name: str, project_path: str = DEFAULT_PROJECT_PATH):
118
  """Creates a new Hugging Face project."""
119
+ global repo
120
  try:
121
  if os.path.exists(project_path):
122
+ return f"Error: Directory '{project_path}' already exists!"
123
  # Create the repository
124
  repo = Repository(local_dir=project_path, clone_from=None)
125
  repo.git_init()
126
+
127
+ # Add basic files (optional, you can customize this)
128
  with open(os.path.join(project_path, "README.md"), "w") as f:
129
+ f.write(f"# {project_name}\n\nA new Hugging Face project.")
130
+
131
  # Stage all changes
132
  repo.git_add(pattern="*")
133
  repo.git_commit(commit_message="Initial commit")
 
 
 
 
134
 
135
+ return f"Hugging Face project '{project_name}' created successfully at '{project_path}'"
136
+ except Exception as e:
137
+ return f"Error creating Hugging Face project: {str(e)}"
138
 
139
+ def list_project_files(project_path: str = DEFAULT_PROJECT_PATH) -> str:
140
  """Lists files in the project directory."""
141
  try:
142
  files = os.listdir(project_path)
 
144
  return "Project directory is empty."
145
  return "\n".join(files)
146
  except Exception as e:
147
+ return f"Error listing project files: {str(e)}"
 
148
 
149
+ def read_file_content(file_path: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
150
  """Reads and returns the content of a file in the project."""
151
  try:
152
  full_path = os.path.join(project_path, file_path)
 
154
  content = f.read()
155
  return content
156
  except Exception as e:
157
+ return f"Error reading file: {str(e)}"
158
 
159
+ def write_to_file(file_path: str, content: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
 
160
  """Writes content to a file in the project."""
161
  try:
162
  full_path = os.path.join(project_path, file_path)
163
  with open(full_path, "w") as f:
164
  f.write(content)
165
+ return f"Successfully wrote to '{file_path}'"
166
  except Exception as e:
167
+ return f"Error writing to file: {str(e)}"
 
168
 
169
+ def preview_project(project_path: str = DEFAULT_PROJECT_PATH):
170
  """Provides a preview of the project, if applicable."""
171
  # Assuming a simple HTML preview for now
172
  try:
 
179
  else:
180
  return "No 'index.html' found for preview."
181
  except Exception as e:
182
+ return f"Error previewing project: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
 
184
  def main():
 
185
  with gr.Blocks() as demo:
186
+ gr.Markdown("## FragMixt: Your Hugging Face No-Code App Builder")
187
+
188
  # --- Model Selection ---
189
  with gr.Tab("Model"):
190
+ # --- Model Dropdown with Categories ---
191
  model_categories = gr.Dropdown(
192
+ choices=["Text Generation", "Text Summarization", "Code Generation", "Translation", "Question Answering"],
 
 
 
 
 
 
193
  label="Model Category",
194
+ value="Text Generation"
195
  )
196
  model_name = gr.Dropdown(
197
  choices=[], # Initially empty, will be populated based on category
 
201
  load_output = gr.Textbox(label="Output")
202
  model_description = gr.Markdown(label="Model Description")
203
 
204
+ # --- Function to populate model names based on category ---
205
+ def update_model_dropdown(category):
206
+ models = []
207
+ api = HfApi()
208
+ for model in api.list_models():
209
+ if model.pipeline_tag == category:
210
+ models.append(model.modelId)
211
+ return gr.Dropdown.update(choices=models)
212
+
213
+ # --- Event handler for category dropdown ---
214
  model_categories.change(
215
+ fn=update_model_dropdown,
216
+ inputs=model_categories,
217
+ outputs=model_name,
218
  )
219
+
220
+ # --- Event handler to display model description ---
221
+ def display_model_description(model_name):
222
+ global model_descriptions
223
+ if model_name in model_descriptions:
224
+ return model_descriptions[model_name]
225
+ else:
226
+ return "Model description not available."
227
+
228
  model_name.change(
229
+ fn=display_model_description,
230
+ inputs=model_name,
231
+ outputs=model_description,
 
232
  )
233
 
234
+ load_button.click(load_hf_model, inputs=model_name, outputs=load_output)
235
+
236
  # --- Chat Interface ---
237
  with gr.Tab("Chat"):
238
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True)
239
+ message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
240
+ purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
241
+ agent_name = gr.Dropdown(label="Agents", choices=["Generic Agent"], value="Generic Agent", interactive=True)
242
+ sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
243
+ temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
244
+ max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048 * 10, step=64, interactive=True, info="The maximum numbers of new tokens")
245
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
246
+ repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
  submit_button = gr.Button(value="Send")
248
  history = gr.State([])
249
+
250
+ def run_chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
251
+ response = generate_response(message, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
252
+ history.append((message, response))
253
+ return history, history
254
+
255
+ submit_button.click(run_chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
 
 
 
 
 
 
 
 
256
 
257
  # --- Project Management ---
258
  with gr.Tab("Project"):
259
+ project_name = gr.Textbox(label="Project Name", placeholder="MyHuggingFaceApp")
260
+ create_project_button = gr.Button("Create Hugging Face Project")
261
+ project_output = gr.Textbox(label="Output", lines=5)
262
+ file_content = gr.Code(label="File Content", language="python", lines=20)
263
+ file_path = gr.Textbox(label="File Path (relative to project)", placeholder="src/main.py")
264
+ read_button = gr.Button("Read File")
265
+ write_button = gr.Button("Write to File")
266
+ command_input = gr.Textbox(label="Terminal Command", placeholder="pip install -r requirements.txt")
267
+ command_output = gr.Textbox(label="Command Output", lines=5)
 
 
 
268
  run_command_button = gr.Button("Run Command")
269
+ preview_button = gr.Button("Preview Project")
 
 
270
 
271
+ create_project_button.click(create_hf_project, inputs=[project_name], outputs=project_output)
272
+ read_button.click(read_file_content, inputs=file_path, outputs=file_content)
273
+ write_button.click(write_to_file, inputs=[file_path, file_content], outputs=project_output)
274
+ run_command_button.click(execute_command, inputs=command_input, outputs=command_output)
275
+ preview_button.click(preview_project, outputs=project_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
 
277
+ demo.launch()
 
 
 
 
 
 
 
 
 
 
278
 
 
279
  if __name__ == "__main__":
280
+ main()