acecalisto3 commited on
Commit
6145f14
·
verified ·
1 Parent(s): 5762a81

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +169 -246
app.py CHANGED
@@ -1,249 +1,172 @@
 
1
  import os
2
  import subprocess
3
- import random
4
- import json
5
- from datetime import datetime
6
-
7
- from huggingface_hub import (
8
- InferenceClient,
9
- cached_download,
10
- hf_hub_url
11
- )
12
- import gradio as gr
13
-
14
- from safe_search import safe_search
15
- from i_search import google
16
- from i_search import i_search as i_s
17
-
18
- from agent import (
19
- ACTION_PROMPT,
20
- ADD_PROMPT,
21
- COMPRESS_HISTORY_PROMPT,
22
-
23
- LOG_PROMPT,
24
- LOG_RESPONSE,
25
- MODIFY_PROMPT,
26
- PRE_PREFIX,
27
- SEARCH_QUERY,
28
- READ_PROMPT,
29
- TASK_PROMPT,
30
- UNDERSTAND_TEST_RESULTS_PROMPT,
31
- )
32
-
33
- from utils import (
34
- parse_action,
35
- parse_file_content,
36
- read_python_module_structure
37
- )
38
- from datetime import datetime
39
- import json
40
-
41
- #--- Global Variables for App State ---
42
- app_state = {"components": []}
43
-
44
- terminal_history = ""
45
- #--- Component Library ---
46
- components_registry = { "Button": { "properties": {"label": "Click Me", "onclick": ""}, "description": "A clickable button", "code_snippet": 'gr.Button(value="{label}", variant="primary")', }, "Text Input": { "properties": {"value": "", "placeholder": "Enter text"}, "description": "A field for entering text", "code_snippet": 'gr.Textbox(label="{placeholder}")', }, "Image": { "properties": {"src": "#", "alt": "Image"}, "description": "Displays an image", "code_snippet": 'gr.Image(label="{alt}")', }, "Dropdown": { "properties": {"choices": ["Option 1", "Option 2"], "value": ""}, "description": "A dropdown menu for selecting options", "code_snippet": 'gr.Dropdown(choices={choices}, label="Dropdown")', }, }, # Add more components here... }
47
-
48
- #--- NLP Model (Example using Hugging Face) ---
49
- nlp_model_name = "google/flan-t5-small"
50
-
51
- # Check if the model exists in the cache
52
- try: cached_download(hf_hub_url(nlp_model_name, revision="main")) nlp_model = InferenceClient(nlp_model_name) except: nlp_model = None
53
-
54
- #--- Function to get NLP model response ---
55
- def get_nlp_response(input_text): if nlp_model: response = nlp_model.text_generation(input_text) return response.generated_text else: return "NLP model not available."
56
-
57
- # --- Component Class ---
58
- class Component: def init(self, type, properties=None, id=None): self.id = id or random.randint(1000, 9999) self.type = type self.properties = properties or components_registry[type]["properties"].copy()
59
-
60
- def to_dict(self):
61
- return {
62
- "id": self.id,
63
- "type": self.type,
64
- "properties": self.properties,
65
- }
66
-
67
- def render(self):
68
- # Properly format choices for Dropdown
69
- if self.type == "Dropdown":
70
- self.properties["choices"] = (
71
- str(self.properties["choices"])
72
- .replace("[", "")
73
- .replace("]", "")
74
- .replace("'", "")
75
- )
76
- return components_registry[self.type]["code_snippet"].format(
77
- **self.properties
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  )
79
- # --- Function to update the app canvas (for preview) ---
80
- def update_app_canvas(): components_html = "".join( [ f"<div>Component ID: {component['id']}, Type: {component['type']}, Properties: {component['properties']}</div>" for component in app_state["components"] ] ) return components_html
81
-
82
- # --- Function to handle component addition ---
83
- def add_component(component_type): if component_type in components_registry: new_component = Component(component_type) app_state["components"].append(new_component.to_dict()) return ( update_app_canvas(), f"System: Added component: {component_type}\n", ) else: return None, f"Error: Invalid component type: {component_type}\n"
84
-
85
- # --- Function to handle terminal input ---
86
- def run_terminal_command(command, history): global terminal_history output = "" try: # Basic command parsing (expand with NLP) if command.startswith("add "): component_type = command.split("add ", 1)[1].strip() _, output = add_component(component_type) elif command.startswith("set "): _, output = set_component_property(command) elif command.startswith("search "): search_query = command.split("search ", 1)[1].strip() output = i_s(search_query) elif command.startswith("deploy "): app_name = command.split("deploy ", 1)[1].strip() output = deploy_to_huggingface(app_name) else: # Attempt to execute command as Python code try: result = subprocess.check_output( command, shell=True, stderr=subprocess.STDOUT, text=True ) output = result except Exception as e: output = f"Error executing Python code: {str(e)}" except Exception as e: output = f"Error: {str(e)}" finally: terminal_history += f"User: {command}\n" terminal_history += f"{output}\n" return terminal_history
87
-
88
- def set_component_property(command): try: # Improved 'set' command parsing set_parts = command.split(" ", 2)[1:] if len(set_parts) != 2: raise ValueError("Invalid 'set' command format.")
89
-
90
- component_id = int(set_parts[0]) # Use component ID
91
- property_name, property_value = set_parts[1].split("=", 1)
92
-
93
- # Find component by ID
94
- component_found = False
95
- for component in app_state["components"]:
96
- if component["id"] == component_id:
97
- if property_name in component["properties"]:
98
- component["properties"][
99
- property_name.strip()
100
- ] = property_value.strip()
101
- component_found = True
102
- return (
103
- update_app_canvas(),
104
- f"System: Property '{property_name}' set to '{property_value}' for component {component_id}\n",
105
- )
106
- else:
107
- return (
108
- None,
109
- f"Error: Property '{property_name}' not found in component {component_id}\n",
110
- )
111
- if not component_found:
112
- return (
113
- None,
114
- f"Error: Component with ID {component_id} not found.\n",
115
- )
116
-
117
- except Exception as e:
118
- return None, f"Error: Invalid 'set' command format or error setting property: {str(e)}\n"
119
- #--- Function to handle chat interaction ---
120
- def run_chat(message, history): global terminal_history if message.startswith("!"): command = message[1:] terminal_history = run_terminal_command(command, history) return history, terminal_history else: # ... (Your regular chat response generation) return history, terminal_history
121
-
122
- # --- Code Generation ---
123
- def generate_python_code(app_name): code = f""" import gradio as gr
124
-
125
- Define your Gradio components here
126
- with gr.Blocks() as {app_name}: """ for component in app_state["components"]: code += " " + Component(**component).render() + "\n"
127
-
128
- code += f"""
129
- {app_name}.launch() """ return code
130
-
131
- # --- Save/Load App State ---
132
- def save_app_state(filename="app_state.json"): with open(filename, "w") as f: json.dump(app_state, f)
133
-
134
- # --- Hugging Face Deployment --- def deploy_to_huggingface(app_name): # Generate Python code code = generate_python_code(app_name)
135
- def load_app_state(filename="app_state.json"): global app_state try: with open(filename, "r") as f: app_state = json.load(f) except FileNotFoundError: print("App state file not found. Starting with a blank slate.")
136
-
137
- # Create requirements.txt
138
- with open("requirements.txt", "w") as f:
139
- f.write("gradio==3.32.0\n")
140
-
141
- # Create the app.py file
142
- with open("app.py", "w") as f:
143
- f.write(code)
144
-
145
- # Execute the deployment command
146
- try:
147
- subprocess.run(
148
- [
149
- "huggingface-cli",
150
- "repo",
151
- "create",
152
- "--type",
153
- "space",
154
- "--space_sdk",
155
- "gradio",
156
- app_name,
157
- ],
158
- check=True,
159
- )
160
- subprocess.run(
161
- ["git", "init"], cwd=f"./{app_name}", check=True
162
- )
163
- subprocess.run(
164
- ["git", "add", "."], cwd=f"./{app_name}", check=True
165
- )
166
- subprocess.run(
167
- ['git', 'commit', '-m', '"Initial commit"'], cwd=f"./{user_name}/{app_name}", check=True
168
- )
169
- subprocess.run(
170
- ["git", "push", "https://huggingface.co/spaces/" + app_name, "main"], cwd=f"./{app_name}", check=True
171
- )
172
- return (
173
- f"Successfully deployed to Hugging Face Spaces: https://huggingface.co/spaces/{app_name}"
174
- )
175
- except Exception as e:
176
- return f"Error deploying to Hugging Face Spaces: {e}"
177
- --- Gradio Interface ---
178
- with gr.Blocks() as iface: with gr.Row(): # --- Chat Interface --- chat_history = gr.Chatbot(label="Chat with Agent") chat_input = gr.Textbox(label="Your Message") chat_button = gr.Button("Send")
179
-
180
- chat_button.click(
181
- run_chat,
182
- inputs=[chat_input, chat_history],
183
- outputs=[chat_history, terminal_output],
184
- )
185
-
186
- with gr.Row():
187
- # --- App Builder Section ---
188
- app_canvas = gr.HTML(
189
- "<div>App Canvas Preview:</div>", label="App Canvas"
190
- )
191
- with gr.Column():
192
- component_list = gr.Dropdown(
193
- choices=list(components_registry.keys()), label="Components"
194
- )
195
- add_button = gr.Button("Add Component")
196
-
197
- add_button.click(
198
- add_component,
199
- inputs=component_list,
200
- outputs=[app_canvas, terminal_output],
201
- )
202
-
203
- with gr.Row():
204
- # --- Terminal ---
205
- terminal_output = gr.Textbox(
206
- lines=8, label="Terminal", value=terminal_history
207
- )
208
- terminal_input = gr.Textbox(label="Enter Command")
209
- terminal_button = gr.Button("Run")
210
-
211
- terminal_button.click(
212
- run_terminal_command,
213
- inputs=[terminal_input, terminal_output],
214
- outputs=terminal_output,
215
- )
216
-
217
- with gr.Row():
218
- # --- Code Generation ---
219
- code_output = gr.Code(
220
- generate_python_code("app_name"),
221
- language="python",
222
- label="Generated Code",
223
- )
224
- app_name_input = gr.Textbox(label="App Name")
225
- generate_code_button = gr.Button("Generate Code")
226
- generate_code_button.click(
227
- generate_python_code,
228
- inputs=[app_name_input],
229
- outputs=code_output,
230
- )
231
-
232
- with gr.Row():
233
- # --- Save/Load Buttons ---
234
- save_button = gr.Button("Save App State")
235
- load_button = gr.Button("Load App State")
236
-
237
- save_button.click(save_app_state)
238
- load_button.click(load_app_state)
239
-
240
- with gr.Row():
241
- # --- Deploy Button ---
242
- deploy_button = gr.Button("Deploy to Hugging Face")
243
- deploy_output = gr.Textbox(label="Deployment Output")
244
- deploy_button.click(
245
- deploy_to_huggingface,
246
- inputs=[app_name_input],
247
- outputs=[deploy_output],
248
- )
249
- iface.launch()
 
1
+ import streamlit as st
2
  import os
3
  import subprocess
4
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
5
+ import black
6
+ from pylint import lint
7
+ from io import StringIO
8
+ import sys
9
+
10
+ PROJECT_ROOT = "projects"
11
+ AGENT_DIRECTORY = "agents"
12
+
13
+ if "chat_history" not in st.session_state:
14
+ st.session_state.chat_history = []
15
+ if "terminal_history" not in st.session_state:
16
+ st.session_state.terminal_history = []
17
+ if "workspace_projects" not in st.session_state:
18
+ st.session_state.workspace_projects = {}
19
+ if "available_agents" not in st.session_state:
20
+ st.session_state.available_agents = []
21
+
22
+ class AIAgent:
23
+ def __init__(self, name, description, skills):
24
+ self.name = name
25
+ self.description = description
26
+ self.skills = skills
27
+
28
+ def create_agent_prompt(self):
29
+ skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
30
+ agent_prompt = f"""
31
+ As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas: {skills_str}
32
+
33
+ I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter. """
34
+ return agent_prompt
35
+
36
+ def autonomous_build(self, chat_history, workspace_projects):
37
+ """
38
+ Autonomous build logic that continues based on the state of chat history and workspace projects.
39
+ """
40
+ # Example logic: Generate a summary of chat history and workspace state
41
+ summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
42
+ summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
43
+
44
+ # Example: Generate the next logical step in the project
45
+ next_step = "Based on the current state, the next logical step is to implement the main application logic."
46
+
47
+ return summary, next_step
48
+
49
+ def save_agent_to_file(agent):
50
+ """Saves the agent's prompt to a file."""
51
+ if not os.path.exists(AGENT_DIRECTORY):
52
+ os.makedirs(AGENT_DIRECTORY)
53
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
54
+ with open(file_path, "w") as file:
55
+ file.write(agent.create_agent_prompt())
56
+ st.session_state.available_agents.append(agent.name)
57
+
58
+ def load_agent_prompt(agent_name):
59
+ """Loads an agent prompt from a file."""
60
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
61
+ if os.path.exists(file_path):
62
+ with open(file_path, "r") as file:
63
+ agent_prompt = file.read()
64
+ return agent_prompt
65
+ else:
66
+ return None
67
+
68
+ def create_agent_from_text(name, text):
69
+ skills = text.split('\n')
70
+ agent = AIAgent(name, "AI agent created from text input.", skills)
71
+ save_agent_to_file(agent)
72
+ return agent.create_agent_prompt()
73
+
74
+ def chat_interface_with_agent(input_text, agent_name):
75
+ agent_prompt = load_agent_prompt(agent_name)
76
+ if agent_prompt is None:
77
+ return f"Agent {agent_name} not found."
78
+
79
+ model_name = "gpt2"
80
+ try:
81
+ model = AutoModelForCausalLM.from_pretrained(model_name)
82
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
83
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
84
+ except EnvironmentError as e:
85
+ return f"Error loading model: {e}"
86
+
87
+ # Combine the agent prompt with user input
88
+ combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
89
+
90
+ # Truncate input text to avoid exceeding the model's maximum length
91
+ max_input_length = 900
92
+ input_ids = tokenizer.encode(combined_input, return_tensors="pt")
93
+ if input_ids.shape[1] > max_input_length:
94
+ input_ids = input_ids[:, :max_input_length]
95
+
96
+ # Generate chatbot response
97
+ outputs = model.generate(
98
+ input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True
99
  )
100
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
101
+ return response
102
+
103
+ def terminal_interface(command, project_name=None):
104
+ if project_name:
105
+ project_path = os.path.join(PROJECT_ROOT, project_name)
106
+ result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_path)
107
+ else:
108
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
109
+ return result.stdout
110
+
111
+ def code_editor_interface(code):
112
+ formatted_code = black.format_str(code, mode=black.FileMode())
113
+ pylint_output = lint.Run([formatted_code], do_exit=False)
114
+ pylint_output_str = StringIO()
115
+ pylint_output.linter.reporter.write_messages(pylint_output_str)
116
+ return formatted_code, pylint_output_str.getvalue()
117
+
118
+ def summarize_text(text):
119
+ summarizer = pipeline("summarization")
120
+ summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
121
+ return summary[0]['summary_text']
122
+
123
+ def sentiment_analysis(text):
124
+ analyzer = pipeline("sentiment-analysis")
125
+ result = analyzer(text)
126
+ return result[0]['label']
127
+
128
+ def translate_code(code, source_language, target_language):
129
+ # Placeholder for translation logic
130
+ return f"Translated {source_language} code to {target_language}."
131
+
132
+ def generate_code(idea):
133
+ # Placeholder for code generation logic
134
+ return f"Generated code based on the idea: {idea}."
135
+
136
+ def workspace_interface(project_name):
137
+ project_path = os.path.join(PROJECT_ROOT, project_name)
138
+ if not os.path.exists(project_path):
139
+ os.makedirs(project_path)
140
+ st.session_state.workspace_projects[project_name] = {'files': []}
141
+ return f"Project '{project_name}' created successfully."
142
+
143
+ def add_code_to_workspace(project_name, code, file_name):
144
+ project_path = os.path.join(PROJECT_ROOT, project_name)
145
+ if not os.path.exists(project_path):
146
+ return f"Project '{project_name}' does not exist."
147
+
148
+ file_path = os.path.join(project_path, file_name)
149
+ with open(file_path, "w") as file:
150
+ file.write(code)
151
+ st.session_state.workspace_projects[project_name]['files'].append(file_name)
152
+ return f"Code added to '{file_name}' in project '{project_name}'."
153
+
154
+ def chat_interface(input_text):
155
+ # Placeholder for chat interface logic
156
+ return f"Chatbot response: {input_text}"
157
+
158
+ st.title("AI Agent Creator")
159
+
160
+ sidebar = st.sidebar
161
+ sidebar.title("Navigation")
162
+ app_mode = sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
163
+
164
+ if app_mode == "AI Agent Creator":
165
+ st.header("Create an AI Agent from Text")
166
+
167
+ subheader = st.subheader
168
+ agent_name = subheader("Enter agent name:")
169
+ text_input = subheader("Enter skills (one per line):")
170
+
171
+ if st.button("Create Agent"):
172
+ agent_prompt = create_agent_from_text(agent_name, text_input)