acecalisto3 commited on
Commit
1a022bd
1 Parent(s): 9c62ad1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +258 -322
app.py CHANGED
@@ -1,344 +1,280 @@
1
  import os
2
  import subprocess
3
  import random
4
- from huggingface_hub import InferenceClient
5
- import gradio as gr
6
- from safe_search import safe_search
7
- from i_search import google
8
- from i_search import i_search as i_s
9
- from agent import (
10
- ACTION_PROMPT,
11
- ADD_PROMPT,
12
- COMPRESS_HISTORY_PROMPT,
13
- LOG_PROMPT,
14
- LOG_RESPONSE,
15
- MODIFY_PROMPT,
16
- PREFIX,
17
- SEARCH_QUERY,
18
- READ_PROMPT,
19
- TASK_PROMPT,
20
- UNDERSTAND_TEST_RESULTS_PROMPT,
21
- )
22
- from utils import parse_action, parse_file_content, read_python_module_structure
23
  from datetime import datetime
 
24
 
25
- now = datetime.now()
26
- date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
 
 
27
 
28
- client = InferenceClient(
29
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
 
 
 
 
 
 
 
 
 
 
 
 
30
  )
31
 
32
- ############################################
33
-
34
- VERBOSE = True
35
- MAX_HISTORY = 100
36
- # MODEL = "gpt-3.5-turbo" # "gpt-4"
37
-
38
- def format_prompt(message, history):
39
- prompt = "<s>"
40
- for user_prompt, bot_response in history:
41
- prompt += f"[INST] {user_prompt} [/INST]"
42
- prompt += f" {bot_response}</s> "
43
- prompt += f"[INST] {message} [/INST]"
44
  return prompt
45
 
46
- def run_gpt(prompt_template, stop_tokens, max_tokens, purpose, **prompt_kwargs):
47
- seed = random.randint(1, 1111111111111111)
48
- print(seed)
49
- generate_kwargs = dict(
50
- temperature=1.0,
51
- max_new_tokens=2096,
52
- top_p=0.99,
53
- repetition_penalty=1.0,
54
- do_sample=True,
55
- seed=seed,
56
- )
57
-
58
- content = PREFIX.format(
 
 
 
59
  date_time_str=date_time_str,
60
- purpose=purpose,
61
- safe_search=safe_search,
62
- ) + prompt_template.format(**prompt_kwargs)
 
63
  if VERBOSE:
64
- print(LOG_PROMPT.format(content))
65
 
66
- # formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
67
- # formatted_prompt = format_prompt(f'{content}', history)
 
 
 
 
 
 
68
 
69
- stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
70
- resp = ""
71
- for response in stream:
72
- resp += response.token.text
73
 
74
  if VERBOSE:
75
- print(LOG_RESPONSE.format(resp))
76
- return resp
77
-
78
- def compress_history(purpose, task, history, directory):
79
- resp = run_gpt(
80
- COMPRESS_HISTORY_PROMPT,
81
- stop_tokens=["observation:", "task:", "action:", "thought:"],
82
- max_tokens=512,
83
- purpose=purpose,
84
- task=task,
85
- history=history,
86
- )
87
- history = "observation: {}\n".format(resp)
88
- return history
89
-
90
- def call_search(purpose, task, history, directory, action_input):
91
- print("CALLING SEARCH")
92
  try:
93
- if "http" in action_input:
94
- if "<" in action_input:
95
- action_input = action_input.strip("<")
96
- if ">" in action_input:
97
- action_input = action_input.strip(">")
98
-
99
- response = i_s(action_input)
100
- # response = google(search_return)
101
- print(response)
102
- history += "observation: search result is: {}\n".format(response)
103
- else:
104
- history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
 
105
  except Exception as e:
106
- history += "observation: {}'\n".format(e)
107
- return "MAIN", None, history, task
108
-
109
- def call_main(purpose, task, history, directory, action_input):
110
- resp = run_gpt(
111
- ACTION_PROMPT,
112
- stop_tokens=["observation:", "task:", "action:","thought:"],
113
- max_tokens=2096,
114
- purpose=purpose,
115
- task=task,
116
- history=history,
117
- )
118
- lines = resp.strip().strip("\n").split("\n")
119
- for line in lines:
120
- if line == "":
121
- continue
122
- if line.startswith("thought: "):
123
- history += "{}\n".format(line)
124
- elif line.startswith("action: "):
125
- action_name, action_input = parse_action(line)
126
- print(f'ACTION_NAME :: {action_name}')
127
- print(f'ACTION_INPUT :: {action_input}')
128
-
129
- history += "{}\n".format(line)
130
- if "COMPLETE" in action_name or "COMPLETE" in action_input:
131
- task = "END"
132
- return action_name, action_input, history, task
133
- else:
134
- return action_name, action_input, history, task
135
  else:
136
- history += "{}\n".format(line)
137
- # history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
138
-
139
- # return action_name, action_input, history, task
140
- # assert False, "unknown action: {}".format(line)
141
- return "MAIN", None, history, task
142
-
143
- def call_set_task(purpose, task, history, directory, action_input):
144
- task = run_gpt(
145
- TASK_PROMPT,
146
- stop_tokens=[],
147
- max_tokens=64,
148
- purpose=purpose,
149
- task=task,
150
- history=history,
151
- ).strip("\n")
152
- history += "observation: task has been updated to: {}\n".format(task)
153
- return "MAIN", None, history, task
154
-
155
- def end_fn(purpose, task, history, directory, action_input):
156
- task = "END"
157
- return "COMPLETE", "COMPLETE", history, task
158
-
159
- NAME_TO_FUNC = {
160
- "MAIN": call_main,
161
- "UPDATE-TASK": call_set_task,
162
- "SEARCH": call_search,
163
- "COMPLETE": end_fn,
164
- }
165
-
166
- def run_action(purpose, task, history, directory, action_name, action_input):
167
- print(f'action_name::{action_name}')
168
  try:
169
- if "RESPONSE" in action_name or "COMPLETE" in action_name:
170
- action_name = "COMPLETE"
171
- task = "END"
172
- return action_name, "COMPLETE", history, task
173
-
174
- # compress the history when it is long
175
- if len(history.split("\n")) > MAX_HISTORY:
176
- if VERBOSE:
177
- print("COMPRESSING HISTORY")
178
- history = compress_history(purpose, task, history, directory)
179
- if not action_name in NAME_TO_FUNC:
180
- action_name = "MAIN"
181
- if action_name == "" or action_name == None:
182
- action_name = "MAIN"
183
- assert action_name in NAME_TO_FUNC
184
-
185
- print("RUN: ", action_name, action_input)
186
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
187
  except Exception as e:
188
- history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
189
- return "MAIN", None, history, task
190
-
191
- def run(purpose, history):
192
- # print(purpose)
193
- # print(hist)
194
- task = None
195
- directory = "./"
196
- if history:
197
- history = str(history).strip("[]")
198
- if not history:
199
- history = ""
200
-
201
- action_name = "UPDATE-TASK" if task is None else "MAIN"
202
- action_input = None
203
- while True:
204
- print("")
205
- print("")
206
- print("---")
207
- print("purpose:", purpose)
208
- print("task:", task)
209
- print("---")
210
- print(history)
211
- print("---")
212
-
213
- action_name, action_input, history, task = run_action(
214
- purpose,
215
- task,
216
- history,
217
- directory,
218
- action_name,
219
- action_input,
220
- )
221
- yield (history)
222
- # yield ("",[(purpose,history)])
223
- if task == "END":
224
- return (history)
225
- # return ("", [(purpose,history)])
226
-
227
- ################################################
228
-
229
- def format_prompt(message, history):
230
- prompt = "<s>"
231
- for user_prompt, bot_response in history:
232
- prompt += f"[INST] {user_prompt} [/INST]"
233
- prompt += f" {bot_response}</s> "
234
- prompt += f"[INST] {message} [/INST]"
235
- return prompt
236
 
237
- AGENTS = [
238
- "WEB_DEV",
239
- "AI_SYSTEM_PROMPT",
240
- "PYTHON_CODE_DEV"
241
- ]
242
-
243
- def generate(prompt, history, agent_name=AGENTS[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
244
- seed = random.randint(1, 1111111111111111)
245
-
246
- agent = prompts.WEB_DEV
247
- if agent_name == "WEB_DEV":
248
- agent = prompts.WEB_DEV
249
- if agent_name == "AI_SYSTEM_PROMPT":
250
- agent = prompts.AI_SYSTEM_PROMPT
251
- if agent_name == "PYTHON_CODE_DEV":
252
- agent = prompts.PYTHON_CODE_DEV
253
- system_prompt = agent
254
- temperature = float(temperature)
255
- if temperature < 1e-2:
256
- temperature = 1e-2
257
- top_p = float(top_p)
258
-
259
- generate_kwargs = dict(
260
- temperature=temperature,
261
- max_new_tokens=max_new_tokens,
262
- top_p=top_p,
263
- repetition_penalty=repetition_penalty,
264
- do_sample=True,
265
- seed=seed,
266
- )
267
-
268
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
269
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
270
- output = ""
271
-
272
- for response in stream:
273
- output += response.token.text
274
- yield output
275
- return output
276
-
277
- additional_inputs = [
278
- gr.Dropdown(
279
- label="Agents",
280
- choices=[s for s in AGENTS],
281
- value=AGENTS[0],
282
- interactive=True,
283
- ),
284
- gr.Textbox(
285
- label="System Prompt",
286
- max_lines=1,
287
- interactive=True,
288
- ),
289
- gr.Slider(
290
- label="Temperature",
291
- value=0.9,
292
- minimum=0.0,
293
- maximum=1.0,
294
- step=0.05,
295
- interactive=True,
296
- info="Higher values generate more diverse outputs.",
297
- ),
298
- gr.Slider(
299
- label="Max New Tokens",
300
- value=2048,
301
- minimum=64,
302
- maximum=4096,
303
- step=64,
304
- interactive=True,
305
- info="The maximum number of new tokens to generate.",
306
- ),
307
- gr.Slider(
308
- label="Top-p (Nucleus Sampling)",
309
- value=0.90,
310
- minimum=0.0,
311
- maximum=1,
312
- step=0.05,
313
- interactive=True,
314
- info="Higher values sample more low-probability tokens.",
315
- ),
316
- gr.Slider(
317
- label="Repetition Penalty",
318
- value=1.2,
319
- minimum=1.0,
320
- maximum=2.0,
321
- step=0.05,
322
- interactive=True,
323
- info="Penalize repeated tokens.",
324
- )
325
- ]
326
-
327
- customCSS = """
328
- #component-7 {
329
- height: 1600px;
330
- flex-grow: 4;
331
- }
332
- """
333
-
334
- with gr.Blocks(theme='ParityError/Interstellar') as demo:
335
- gr.ChatInterface(
336
- generate,
337
- additional_inputs=additional_inputs,
338
- )
339
-
340
- from gradio import Blocks
341
-
342
- app = gr.Blocks(analytics_enabled=False, title="Test")
343
-
344
- app.launch(share=True,)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import subprocess
3
  import random
4
+ import time
5
+ from typing import Dict, List, Tuple
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  from datetime import datetime
7
+ import logging
8
 
9
+ import gradio as gr
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
11
+ from huggingface_hub import InferenceClient, cached_download, Repository, HfApi
12
+ from IPython.display import display, HTML
13
 
14
+ # --- Configuration ---
15
+ VERBOSE = True
16
+ MAX_HISTORY = 5
17
+ MAX_TOKENS = 2048
18
+ TEMPERATURE = 0.7
19
+ TOP_P = 0.8
20
+ REPETITION_PENALTY = 1.5
21
+ DEFAULT_PROJECT_PATH = "./my-hf-project" # Default project directory
22
+
23
+ # --- Logging Setup ---
24
+ logging.basicConfig(
25
+ filename="app.log",
26
+ level=logging.INFO,
27
+ format="%(asctime)s - %(levelname)s - %(message)s",
28
  )
29
 
30
+ # --- Global Variables ---
31
+ current_model = None # Store the currently loaded model
32
+ repo = None # Store the Hugging Face Repository object
33
+ model_descriptions = {} # Store model descriptions
34
+
35
+ # --- Functions ---
36
+ def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
37
+ prompt = ""
38
+ for user_prompt, bot_response in history[-max_history_turns:]:
39
+ prompt += f"Human: {user_prompt}\nAssistant: {bot_response}\n"
40
+ prompt += f"Human: {message}\nAssistant:"
 
41
  return prompt
42
 
43
+ def generate_response(
44
+ prompt: str,
45
+ history: List[Tuple[str, str]],
46
+ agent_name: str = "Generic Agent",
47
+ sys_prompt: str = "",
48
+ temperature: float = TEMPERATURE,
49
+ max_new_tokens: int = MAX_TOKENS,
50
+ top_p: float = TOP_P,
51
+ repetition_penalty: float = REPETITION_PENALTY,
52
+ ) -> str:
53
+ global current_model
54
+ if current_model is None:
55
+ return "Error: Please load a model first."
56
+
57
+ date_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
58
+ full_prompt = PREFIX.format(
59
  date_time_str=date_time_str,
60
+ purpose=sys_prompt,
61
+ agent_name=agent_name
62
+ ) + format_prompt(prompt, history)
63
+
64
  if VERBOSE:
65
+ logging.info(LOG_PROMPT.format(content=full_prompt))
66
 
67
+ response = current_model(
68
+ full_prompt,
69
+ max_new_tokens=max_new_tokens,
70
+ temperature=temperature,
71
+ top_p=top_p,
72
+ repetition_penalty=repetition_penalty,
73
+ do_sample=True
74
+ )[0]['generated_text']
75
 
76
+ assistant_response = response.split("Assistant:")[-1].strip()
 
 
 
77
 
78
  if VERBOSE:
79
+ logging.info(LOG_RESPONSE.format(resp=assistant_response))
80
+
81
+ return assistant_response
82
+
83
+ def load_hf_model(model_name: str):
84
+ """Loads a language model and fetches its description."""
85
+ global current_model, model_descriptions
 
 
 
 
 
 
 
 
 
 
86
  try:
87
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
88
+ current_model = pipeline(
89
+ "text-generation",
90
+ model=model_name,
91
+ tokenizer=tokenizer,
92
+ model_kwargs={"load_in_8bit": True}
93
+ )
94
+
95
+ # Fetch and store the model description
96
+ api = HfApi()
97
+ model_info = api.model_info(model_name)
98
+ model_descriptions[model_name] = model_info.pipeline_tag
99
+ return f"Successfully loaded model: {model_name}"
100
  except Exception as e:
101
+ return f"Error loading model: {str(e)}"
102
+
103
+ def execute_command(command: str, project_path: str = None) -> str:
104
+ """Executes a shell command and returns the output."""
105
+ try:
106
+ if project_path:
107
+ process = subprocess.Popen(command, shell=True, cwd=project_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  else:
109
+ process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
110
+ output, error = process.communicate()
111
+ if error:
112
+ return f"Error: {error.decode('utf-8')}"
113
+ return output.decode("utf-8")
114
+ except Exception as e:
115
+ return f"Error executing command: {str(e)}"
116
+
117
+ def create_hf_project(project_name: str, project_path: str = DEFAULT_PROJECT_PATH):
118
+ """Creates a new Hugging Face project."""
119
+ global repo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  try:
121
+ if os.path.exists(project_path):
122
+ return f"Error: Directory '{project_path}' already exists!"
123
+ # Create the repository
124
+ repo = Repository(local_dir=project_path, clone_from=None)
125
+ repo.git_init()
126
+
127
+ # Add basic files (optional, you can customize this)
128
+ with open(os.path.join(project_path, "README.md"), "w") as f:
129
+ f.write(f"# {project_name}\n\nA new Hugging Face project.")
130
+
131
+ # Stage all changes
132
+ repo.git_add(pattern="*")
133
+ repo.git_commit(commit_message="Initial commit")
134
+
135
+ return f"Hugging Face project '{project_name}' created successfully at '{project_path}'"
 
 
 
136
  except Exception as e:
137
+ return f"Error creating Hugging Face project: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
+ def list_project_files(project_path: str = DEFAULT_PROJECT_PATH) -> str:
140
+ """Lists files in the project directory."""
141
+ try:
142
+ files = os.listdir(project_path)
143
+ if not files:
144
+ return "Project directory is empty."
145
+ return "\n".join(files)
146
+ except Exception as e:
147
+ return f"Error listing project files: {str(e)}"
148
+
149
+ def read_file_content(file_path: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
150
+ """Reads and returns the content of a file in the project."""
151
+ try:
152
+ full_path = os.path.join(project_path, file_path)
153
+ with open(full_path, "r") as f:
154
+ content = f.read()
155
+ return content
156
+ except Exception as e:
157
+ return f"Error reading file: {str(e)}"
158
+
159
+ def write_to_file(file_path: str, content: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
160
+ """Writes content to a file in the project."""
161
+ try:
162
+ full_path = os.path.join(project_path, file_path)
163
+ with open(full_path, "w") as f:
164
+ f.write(content)
165
+ return f"Successfully wrote to '{file_path}'"
166
+ except Exception as e:
167
+ return f"Error writing to file: {str(e)}"
168
+
169
+ def preview_project(project_path: str = DEFAULT_PROJECT_PATH):
170
+ """Provides a preview of the project, if applicable."""
171
+ # Assuming a simple HTML preview for now
172
+ try:
173
+ index_html_path = os.path.join(project_path, "index.html")
174
+ if os.path.exists(index_html_path):
175
+ with open(index_html_path, "r") as f:
176
+ html_content = f.read()
177
+ display(HTML(html_content))
178
+ return "Previewing 'index.html'"
179
+ else:
180
+ return "No 'index.html' found for preview."
181
+ except Exception as e:
182
+ return f"Error previewing project: {str(e)}"
183
+
184
+ def main():
185
+ with gr.Blocks() as demo:
186
+ gr.Markdown("## FragMixt: Your Hugging Face No-Code App Builder")
187
+
188
+ # --- Model Selection ---
189
+ with gr.Tab("Model"):
190
+ # --- Model Dropdown with Categories ---
191
+ model_categories = gr.Dropdown(
192
+ choices=["Text Generation", "Text Summarization", "Code Generation", "Translation", "Question Answering"],
193
+ label="Model Category",
194
+ value="Text Generation"
195
+ )
196
+ model_name = gr.Dropdown(
197
+ choices=[], # Initially empty, will be populated based on category
198
+ label="Hugging Face Model Name",
199
+ )
200
+ load_button = gr.Button("Load Model")
201
+ load_output = gr.Textbox(label="Output")
202
+ model_description = gr.Markdown(label="Model Description")
203
+
204
+ # --- Function to populate model names based on category ---
205
+ def update_model_dropdown(category):
206
+ models = []
207
+ api = HfApi()
208
+ for model in api.list_models():
209
+ if model.pipeline_tag == category:
210
+ models.append(model.modelId)
211
+ return gr.Dropdown.update(choices=models)
212
+
213
+ # --- Event handler for category dropdown ---
214
+ model_categories.change(
215
+ fn=update_model_dropdown,
216
+ inputs=model_categories,
217
+ outputs=model_name,
218
+ )
219
+
220
+ # --- Event handler to display model description ---
221
+ def display_model_description(model_name):
222
+ global model_descriptions
223
+ if model_name in model_descriptions:
224
+ return model_descriptions[model_name]
225
+ else:
226
+ return "Model description not available."
227
+
228
+ model_name.change(
229
+ fn=display_model_description,
230
+ inputs=model_name,
231
+ outputs=model_description,
232
+ )
233
+
234
+ load_button.click(load_hf_model, inputs=model_name, outputs=load_output)
235
+
236
+ # --- Chat Interface ---
237
+ with gr.Tab("Chat"):
238
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True)
239
+ message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
240
+ purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
241
+ agent_name = gr.Dropdown(label="Agents", choices=["Generic Agent"], value="Generic Agent", interactive=True)
242
+ sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
243
+ temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
244
+ max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048 * 10, step=64, interactive=True, info="The maximum numbers of new tokens")
245
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
246
+ repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
247
+ submit_button = gr.Button(value="Send")
248
+ history = gr.State([])
249
+
250
+ def run_chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
251
+ response = generate_response(message, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
252
+ history.append((message, response))
253
+ return history, history
254
+
255
+ submit_button.click(run_chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
256
+
257
+ # --- Project Management ---
258
+ with gr.Tab("Project"):
259
+ project_name = gr.Textbox(label="Project Name", placeholder="MyHuggingFaceApp")
260
+ create_project_button = gr.Button("Create Hugging Face Project")
261
+ project_output = gr.Textbox(label="Output", lines=5)
262
+ file_content = gr.Code(label="File Content", language="python", lines=20)
263
+ file_path = gr.Textbox(label="File Path (relative to project)", placeholder="src/main.py")
264
+ read_button = gr.Button("Read File")
265
+ write_button = gr.Button("Write to File")
266
+ command_input = gr.Textbox(label="Terminal Command", placeholder="pip install -r requirements.txt")
267
+ command_output = gr.Textbox(label="Command Output", lines=5)
268
+ run_command_button = gr.Button("Run Command")
269
+ preview_button = gr.Button("Preview Project")
270
+
271
+ create_project_button.click(create_hf_project, inputs=[project_name], outputs=project_output)
272
+ read_button.click(read_file_content, inputs=file_path, outputs=file_content)
273
+ write_button.click(write_to_file, inputs=[file_path, file_content], outputs=project_output)
274
+ run_command_button.click(execute_command, inputs=command_input, outputs=command_output)
275
+ preview_button.click(preview_project, outputs=project_output)
276
+
277
+ demo.launch()
278
+
279
+ if __name__ == "__main__":
280
+ main()