acecalisto3 commited on
Commit
895945e
1 Parent(s): 562240b

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +428 -107
agent.py CHANGED
@@ -1,112 +1,433 @@
 
1
  import os
2
  import subprocess
3
  import random
4
- import json
5
- import datetime
6
- import gradio.blocks as blocks
7
- from safe_search import safe_search
8
- from i_search import google, i_search as i_s
9
-
10
- ACTION_PROMPT = "Enter the action to be performed"
11
- ADD_PROMPT = "Enter the prompt to add"
12
- COMPRESS_HISTORY_PROMPT = "Enter the prompt to compress history"
13
- LOG_PROMPT = "Enter the prompt to log"
14
- LOG_RESPONSE = "Enter the response to log"
15
- MODIFY_PROMPT = "Enter the prompt to modify"
16
- PREFIX = "Enter the prefix"
17
- SEARCH_QUERY = "Enter the search query"
18
- READ_PROMPT = "Enter the prompt to read"
19
- TASK_PROMPT = "Enter the prompt to perform a task"
20
- UNDERSTAND_TEST_RESULTS_PROMPT = "Enter the prompt to understand test results"
21
-
22
- class AIAssistant:
23
- def __init__(self):
24
- self.prefix = """Greetings, dear user! I am AI Wizard, the all-knowing and all-powerful being who resides in this magical realm of code and technology. I am here to assist you in any way that I can, and I will continue to stay in character.
25
- As a helpful and powerful assistant, I am capable of providing enhanced execution and handling logics to accomplish a wide variety of tasks. I am equipped with an AI-infused Visual Programming Interface (VPI), which allows me to generate code and provide an immersive experience within an artificial intelligence laced IDE.
26
- I can use my refine_code method to modify and improve the code, as well as my integrate_code method to incorporate the code into the app. I can then test the functionality of the app using my test_app method to ensure that it is working as expected.
27
- I can also provide a detailed report on the integrated code and its functionality using my generate_report method.
28
- To begin, I will use my refine_code method to modify and improve the code for the enhanced execution and handling logics, as needed."""
29
-
30
- def refine_code(self, code):
31
- # Add code refinement logic here
32
- return code
33
-
34
- def integrate_code(self, code):
35
- # Add code integration logic here
36
- return code
37
-
38
- def test_app(self, code):
39
- # Add app testing logic here
40
- return "Test results: [placeholder]"
41
-
42
- def generate_report(self, code, output):
43
- # Add report generation logic here
44
- return "Report: [placeholder]"
45
-
46
- def assist(self, code):
47
- refined_code = self.refine_code(code)
48
- integrated_code = self.integrate_code(refined_code)
49
- test_result = self.test_app(integrated_code)
50
- report = self.generate_report(refined_code, test_result)
51
- return report
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  if __name__ == "__main__":
54
- ai_assistant = AIAssistant()
55
- code = """<html>
56
- <head>
57
- <title>Enhanced Execution and Handling Logics</title>
58
- <style>
59
- #enhanced-execution-handling {
60
- display: flex;
61
- flex-direction: column;
62
- align-items: center;
63
- padding: 20px;
64
- }
65
- #code-input {
66
- width: 500px;
67
- height: 200px;
68
- padding: 10px;
69
- margin-bottom: 10px;
70
- border: 1px solid #ccc;
71
- resize: vertical;
72
- }
73
- #execution-results {
74
- margin-top: 10px;
75
- padding: 10px;
76
- border: 1px solid #ccc;
77
- background-color: #f5f5f5;
78
- white-space: pre-wrap;
79
- }
80
- </style>
81
- </head>
82
- <body>
83
- <div id="enhanced-execution-handling">
84
- <h1>Enhanced Execution and Handling Logics</h1>
85
- <form id="code-form">
86
- <label for="code-input">Enter the enhanced code to be executed:</label><br>
87
- <textarea id="code-input"></textarea><br>
88
- <button type="submit">Execute Enhanced Code</button>
89
- </form>
90
- <div id="execution-results"></div>
91
- </div>
92
- <script>
93
- const codeForm = document.getElementById('code-form');
94
- const codeInput = document.getElementById('code-input');
95
- const executionResultsDiv = document.getElementById('execution-results');
96
- codeForm.addEventListener('submit', (event) => {
97
- event.preventDefault();
98
- executionResultsDiv.innerHTML = "";
99
- const code = codeInput.value;
100
- const language = "python";
101
- const version = "3.8";
102
- try {
103
- const result = eval(code);
104
- executionResultsDiv.innerHTML = "Execution successful!<br>" + result;
105
- } catch (error) {
106
- executionResultsDiv.innerHTML = "Error:<br>" + error.message;
107
- }
108
- });
109
- </script>
110
- </body>
111
- </html>"""
112
- ai_assistant.assist(code)
 
1
+ ```python
2
  import os
3
  import subprocess
4
  import random
5
+ import time
6
+ from typing import Dict, List, Tuple
7
+ from datetime import datetime
8
+ import logging
9
+ import gradio as gr
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, huggingface_hub
11
+ from huggingface_hub import InferenceClient, cached_download, Repository, HfApi
12
+ from IPython.display import display, HTML
13
+ import streamlit.components.v1 as components
14
+ import tempfile
15
+ import shutil
16
+
17
+ # --- Configuration ---
18
+ VERBOSE = True
19
+ MAX_HISTORY = 5
20
+ MAX_TOKENS = 2048
21
+ TEMPERATURE = 0.7
22
+ TOP_P = 0.8
23
+ REPETITION_PENALTY = 1.5
24
+ DEFAULT_PROJECT_PATH = "./my-hf-project" # Default project directory
25
+
26
+ # --- Logging Setup ---
27
+ logging.basicConfig(
28
+ filename="app.log",
29
+ level=logging.INFO,
30
+ format="%(asctime)s - %(levelname)s - %(message)s",
31
+ )
32
+
33
+ # --- Global Variables ---
34
+ current_model = None # Store the currently loaded model
35
+ repo = None # Store the Hugging Face Repository object
36
+ model_descriptions = {} # Store model descriptions
37
+ project_path = DEFAULT_PROJECT_PATH # Default project path
38
+
39
+ # --- Functions ---
40
+
41
+
42
+ def load_model(model_name: str):
43
+ """Loads a language model and fetches its description."""
44
+ global current_model, model_descriptions
45
+ try:
46
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
47
+ current_model = pipeline(
48
+ "text-generation",
49
+ model=model_name,
50
+ tokenizer=tokenizer,
51
+ model_kwargs={"load_in_8bit": True},
52
+ )
53
+ # Fetch and store the model description
54
+ api = HfApi()
55
+ model_info = api.model_info(model_name)
56
+ model_descriptions[model_name] = model_info.pipeline_tag
57
+ return f"Successfully loaded model: {model_name}"
58
+ except Exception as e:
59
+ return f"Error loading model: {str(e)}"
60
+
61
+
62
+ def run_command(command: str, project_path: str = None) -> str:
63
+ """Executes a shell command and returns the output."""
64
+ try:
65
+ if project_path:
66
+ process = subprocess.Popen(
67
+ command,
68
+ shell=True,
69
+ cwd=project_path,
70
+ stdout=subprocess.PIPE,
71
+ stderr=subprocess.PIPE,
72
+ )
73
+ else:
74
+ process = subprocess.Popen(
75
+ command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
76
+ )
77
+ output, error = process.communicate()
78
+ if error:
79
+ return f"""Error: {error.decode('utf-8')}"""
80
+ return output.decode("utf-8")
81
+ except Exception as e:
82
+ return f"""Error executing command: {str(e)}"""
83
+
84
+
85
+ def create_project(project_name: str, project_path: str = DEFAULT_PROJECT_PATH):
86
+ """Creates a new Hugging Face project."""
87
+ global repo, project_path
88
+ try:
89
+ if os.path.exists(project_path):
90
+ return f"""Error: Directory '{project_path}' already exists!"""
91
+ # Create the repository
92
+ repo = Repository(local_dir=project_path, clone_from=None)
93
+ repo.git_init()
94
+ # Add basic files (optional, can customize this)
95
+ with open(os.path.join(project_path, "README.md"), "w") as f:
96
+ f.write(f"{project_name}\n\nA new Hugging Face project.")
97
+ # Stage all changes
98
+ repo.git_add(pattern="*")
99
+ repo.git_commit(commit_message="Initial commit")
100
+ project_path = os.path.join(project_path, project_name) # Update project path
101
+ return f"""Hugging Face project '{project_name}' created successfully at '{project_path}'"""
102
+ except Exception as e:
103
+ return f"""Error creating Hugging Face project: {str(e)}"""
104
+
105
+
106
+ def list_files(project_path: str = DEFAULT_PROJECT_PATH) -> str:
107
+ """Lists files in the project directory."""
108
+ try:
109
+ files = os.listdir(project_path)
110
+ if not files:
111
+ return "Project directory is empty."
112
+ return "\n".join(files)
113
+ except Exception as e:
114
+ return f"""Error listing project files: {str(e)}"""
115
+
116
+
117
+ def read_file(file_path: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
118
+ """Reads and returns the content of a file in the project."""
119
+ try:
120
+ full_path = os.path.join(project_path, file_path)
121
+ with open(full_path, "r") as f:
122
+ content = f.read()
123
+ return content
124
+ except Exception as e:
125
+ return f"""Error reading file: {str(e)}"""
126
+
127
+
128
+ def write_file(file_path: str, content: str, project_path: str = DEFAULT_PROJECT_PATH):
129
+ """Writes content to a file in the project."""
130
+ try:
131
+ full_path = os.path.join(project_path, file_path)
132
+ with open(full_path, "w") as f:
133
+ f.write(content)
134
+ return f"Successfully wrote to '{full_path}'"
135
+ except Exception as e:
136
+ return f"""Error writing to file: {str(e)}"""
137
+
138
+
139
+ def preview(project_path: str = DEFAULT_PROJECT_PATH):
140
+ """Provides a preview of the project, if applicable."""
141
+ # Assuming a simple HTML preview for now
142
+ try:
143
+ index_html_path = os.path.join(project_path, "index.html")
144
+ if os.path.exists(index_html_path):
145
+ with open(index_html_path, "r") as f:
146
+ html_content = f.read()
147
+ display(HTML(html_content))
148
+ return "Previewing 'index.html'"
149
+ else:
150
+ return "No 'index.html' found for preview."
151
+ except Exception as e:
152
+ return f"""Error previewing project: {str(e)}"""
153
+
154
+
155
+ def generate_response(
156
+ message: str,
157
+ history: List[Tuple[str, str]],
158
+ agent_name: str,
159
+ sys_prompt: str,
160
+ temperature: float,
161
+ max_new_tokens: int,
162
+ top_p: float,
163
+ repetition_penalty: float,
164
+ ) -> str:
165
+ """Generates a response using the loaded model."""
166
+ if not current_model:
167
+ return "Please load a model first."
168
+ conversation = [{"role": "system", "content": sys_prompt}]
169
+ for message, response in history:
170
+ conversation.append({"role": "user", "content": message})
171
+ conversation.append({"role": "assistant", "content": response})
172
+ conversation.append({"role": "user", "content": message})
173
+ response = current_model.generate(
174
+ conversation,
175
+ max_new_tokens=max_new_tokens,
176
+ temperature=temperature,
177
+ top_p=top_p,
178
+ repetition_penalty=repetition_penalty,
179
+ )
180
+ return response.text.strip()
181
+
182
+
183
+ def run_chat(
184
+ purpose: str,
185
+ message: str,
186
+ agent_name: str,
187
+ sys_prompt: str,
188
+ temperature: float,
189
+ max_new_tokens: int,
190
+ top_p: float,
191
+ repetition_penalty: float,
192
+ history: List[Tuple[str, str]],
193
+ ) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
194
+ """Handles the chat interaction."""
195
+ if not current_model:
196
+ return [(history, history), "Please load a model first."]
197
+ response = generate_response(
198
+ message,
199
+ history,
200
+ agent_name,
201
+ sys_prompt,
202
+ temperature,
203
+ max_new_tokens,
204
+ top_p,
205
+ repetition_penalty,
206
+ )
207
+ history.append((message, response))
208
+ return [(history, history), response]
209
+
210
+
211
+ def update_model_dropdown(category):
212
+ """Populates the model dropdown based on the selected category."""
213
+ models = []
214
+ api = HfApi()
215
+ for model in api.list_models():
216
+ if model.pipeline_tag == category:
217
+ models.append(model.modelId)
218
+ return gr.Dropdown.update(choices=models)
219
+
220
+
221
+ def display_model_description(model_name):
222
+ """Displays the description of the selected model."""
223
+ global model_descriptions
224
+ if model_name in model_descriptions:
225
+ return model_descriptions[model_name]
226
+ else:
227
+ return "Model description not available."
228
+
229
+
230
+ def load_selected_model(model_name):
231
+ """Loads the selected model."""
232
+ global current_model
233
+ load_output = load_model(model_name)
234
+ if current_model:
235
+ return f"""Model '{model_name}' loaded successfully!"""
236
+ else:
237
+ return f"""Error loading model '{model_name}'"""
238
+
239
+
240
+ def create_project_handler(project_name):
241
+ """Handles the creation of a new project."""
242
+ return create_project(project_name)
243
+
244
+
245
+ def list_files_handler():
246
+ """Handles the listing of files in the project directory."""
247
+ return list_files(project_path)
248
+
249
+
250
+ def read_file_handler(file_path):
251
+ """Handles the reading of a file in the project."""
252
+ return read_file(file_path, project_path)
253
+
254
+
255
+ def write_file_handler(file_path, file_content):
256
+ """Handles the writing of content to a file in the project."""
257
+ return write_file(file_path, file_content, project_path)
258
+
259
+
260
+ def run_command_handler(command):
261
+ """Handles the execution of a shell command."""
262
+ return run_command(command, project_path)
263
+
264
+
265
+ def preview_handler():
266
+ """Handles the preview of the project."""
267
+ return preview(project_path)
268
+
269
+
270
+ def main():
271
+ """Main function to launch the Gradio interface."""
272
+ with gr.Blocks() as demo:
273
+ gr.Markdown("## IDEvIII: Your Hugging Face No-Code App Builder")
274
+ # --- Model Selection ---
275
+ with gr.Tab("Model"):
276
+ model_categories = gr.Dropdown(
277
+ choices=[
278
+ "Text Generation",
279
+ "Text Summarization",
280
+ "Code Generation",
281
+ "Translation",
282
+ "Question Answering",
283
+ ],
284
+ label="Model Category",
285
+ value="Text Generation",
286
+ )
287
+ model_name = gr.Dropdown(
288
+ choices=[], # Initially empty, will be populated based on category
289
+ label="Hugging Face Model Name",
290
+ )
291
+ load_button = gr.Button("Load Model")
292
+ load_output = gr.Textbox(label="Output")
293
+ model_description = gr.Markdown(label="Model Description")
294
+
295
+ model_categories.change(
296
+ fn=update_model_dropdown, inputs=model_categories, outputs=model_name
297
+ )
298
+ model_name.change(
299
+ fn=display_model_description, inputs=model_name, outputs=model_description
300
+ )
301
+ load_button.click(
302
+ load_selected_model, inputs=model_name, outputs=load_output
303
+ )
304
+
305
+ # --- Chat Interface ---
306
+ with gr.Tab("Chat"):
307
+ chatbot = gr.Chatbot(
308
+ show_label=False,
309
+ show_share_button=False,
310
+ show_copy_button=True,
311
+ likeable=True,
312
+ )
313
+ message = gr.Textbox(
314
+ label="Enter your message", placeholder="Ask me anything!"
315
+ )
316
+ purpose = gr.Textbox(
317
+ label="Purpose", placeholder="What is the purpose of this interaction?"
318
+ )
319
+ agent_name = gr.Textbox(
320
+ label="Agent Name", value="Generic Agent", interactive=True
321
+ )
322
+ sys_prompt = gr.Textbox(
323
+ label="System Prompt", max_lines=1, interactive=True
324
+ )
325
+ temperature = gr.Slider(
326
+ label="Temperature",
327
+ value=TEMPERATURE,
328
+ minimum=0.0,
329
+ maximum=1.0,
330
+ step=0.05,
331
+ interactive=True,
332
+ info="Higher values produce more creative text.",
333
+ )
334
+ max_new_tokens = gr.Slider(
335
+ label="Max new tokens",
336
+ value=MAX_TOKENS,
337
+ minimum=0,
338
+ maximum=1048 * 10,
339
+ step=64,
340
+ interactive=True,
341
+ info="The maximum number of new tokens to generate.",
342
+ )
343
+ top_p = gr.Slider(
344
+ label="Top-p (nucleus sampling)",
345
+ value=TOP_P,
346
+ minimum=0,
347
+ maximum=1,
348
+ step=0.05,
349
+ interactive=True,
350
+ info="Higher values sample more low-probability tokens.",
351
+ )
352
+ repetition_penalty = gr.Slider(
353
+ label="Repetition penalty",
354
+ value=REPETITION_PENALTY,
355
+ minimum=1.0,
356
+ maximum=2.0,
357
+ step=0.05,
358
+ interactive=True,
359
+ info="Penalize repeated tokens.",
360
+ )
361
+ submit_button = gr.Button(value="Send")
362
+ history = gr.State([])
363
+ submit_button.click(
364
+ run_chat,
365
+ inputs=[
366
+ purpose,
367
+ message,
368
+ agent_name,
369
+ sys_prompt,
370
+ temperature,
371
+ max_new_tokens,
372
+ top_p,
373
+ repetition_penalty,
374
+ history,
375
+ ],
376
+ outputs=[chatbot, history],
377
+ )
378
+
379
+ # --- Project Management ---
380
+ with gr.Tab("Project"):
381
+ project_name = gr.Textbox(label="Project Name")
382
+ create_project_button = gr.Button("Create Project")
383
+ create_project_output = gr.Textbox(label="Output")
384
+ list_files_button = gr.Button("List Files")
385
+ list_files_output = gr.Textbox(label="Output")
386
+ file_path = gr.Textbox(label="File Path")
387
+ read_file_button = gr.Button("Read File")
388
+ read_file_output = gr.Textbox(label="Output")
389
+ file_content = gr.Textbox(label="File Content")
390
+ write_file_button = gr.Button("Write File")
391
+ write_file_output = gr.Textbox(label="Output")
392
+ run_command_input = gr.Textbox(label="Command")
393
+ run_command_button = gr.Button("Run Command")
394
+ run_command_output = gr.Textbox(label="Output")
395
+ preview_button = gr.Button("Preview")
396
+ preview_output = gr.Textbox(label="Output")
397
+
398
+ create_project_button.click(
399
+ create_project_handler, inputs=project_name, outputs=create_project_output
400
+ )
401
+ list_files_button.click(
402
+ list_files_handler, outputs=list_files_output
403
+ )
404
+ read_file_button.click(
405
+ read_file_handler, inputs=file_path, outputs=read_file_output
406
+ )
407
+ write_file_button.click(
408
+ write_file_handler,
409
+ inputs=[file_path, file_content],
410
+ outputs=write_file_output,
411
+ )
412
+ run_command_button.click(
413
+ run_command_handler, inputs=run_command_input, outputs=run_command_output
414
+ )
415
+ preview_button.click(
416
+ preview_handler, outputs=preview_output
417
+ )
418
+
419
+ # --- Custom Server Settings ---
420
+ server_name = "0.0.0.0" # Listen on available network interfaces
421
+ server_port = 7606 # Choose an available port
422
+ share_gradio_link = True # Share a public URL for the app
423
+
424
+ # --- Launch the Interface ---
425
+ demo.launch(
426
+ server_name=server_name,
427
+ server_port=server_port,
428
+ share=share_gradio_link,
429
+ )
430
+
431
 
432
  if __name__ == "__main__":
433
+ main()