acecalisto3 commited on
Commit
9f232dd
1 Parent(s): 84d915d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -325
app.py CHANGED
@@ -12,6 +12,7 @@ import sys
12
  openai.api_key = "YOUR_OPENAI_API_KEY"
13
 
14
  PROJECT_ROOT = "projects"
 
15
 
16
  # Global state to manage communication between Tool Box and Workspace Chat App
17
  if 'chat_history' not in st.session_state:
@@ -20,19 +21,69 @@ if 'terminal_history' not in st.session_state:
20
  st.session_state.terminal_history = []
21
  if 'workspace_projects' not in st.session_state:
22
  st.session_state.workspace_projects = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- # Define functions for each feature
25
-
26
- # 1. Chat Interface
27
- def chat_interface(input_text):
28
- """Handles user input in the chat interface.
29
 
30
- Args:
31
- input_text: User's input text.
 
 
 
32
 
33
- Returns:
34
- The chatbot's response.
35
- """
36
  # Load the GPT-2 model which is compatible with AutoModelForCausalLM
37
  model_name = "gpt2"
38
  try:
@@ -42,9 +93,12 @@ def chat_interface(input_text):
42
  except EnvironmentError as e:
43
  return f"Error loading model: {e}"
44
 
 
 
 
45
  # Truncate input text to avoid exceeding the model's maximum length
46
  max_input_length = 900
47
- input_ids = tokenizer.encode(input_text, return_tensors="pt")
48
  if input_ids.shape[1] > max_input_length:
49
  input_ids = input_ids[:, :max_input_length]
50
 
@@ -55,326 +109,24 @@ def chat_interface(input_text):
55
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
56
  return response
57
 
58
-
59
- # 2. Terminal
60
- def terminal_interface(command, project_name=None):
61
- """Executes commands in the terminal.
62
-
63
- Args:
64
- command: User's command.
65
- project_name: Name of the project workspace to add installed packages.
66
-
67
- Returns:
68
- The terminal output.
69
- """
70
- # Execute command
71
- try:
72
- process = subprocess.run(command.split(), capture_output=True, text=True)
73
- output = process.stdout
74
-
75
- # If the command is to install a package, update the workspace
76
- if "install" in command and project_name:
77
- requirements_path = os.path.join(PROJECT_ROOT, project_name, "requirements.txt")
78
- with open(requirements_path, "a") as req_file:
79
- package_name = command.split()[-1]
80
- req_file.write(f"{package_name}\n")
81
- except Exception as e:
82
- output = f"Error: {e}"
83
- return output
84
-
85
-
86
- # 3. Code Editor
87
- def code_editor_interface(code):
88
- """Provides code completion, formatting, and linting in the code editor.
89
-
90
- Args:
91
- code: User's code.
92
-
93
- Returns:
94
- Formatted and linted code.
95
- """
96
- # Format code using black
97
- try:
98
- formatted_code = black.format_str(code, mode=black.FileMode())
99
- except black.InvalidInput:
100
- formatted_code = code # Keep original code if formatting fails
101
-
102
- # Lint code using pylint
103
- try:
104
- pylint_output = StringIO()
105
- sys.stdout = pylint_output
106
- sys.stderr = pylint_output
107
- lint.Run(['--from-stdin'], stdin=StringIO(formatted_code))
108
- sys.stdout = sys.__stdout__
109
- sys.stderr = sys.__stderr__
110
- lint_message = pylint_output.getvalue()
111
- except Exception as e:
112
- lint_message = f"Pylint error: {e}"
113
-
114
- return formatted_code, lint_message
115
-
116
-
117
- # 4. Workspace
118
- def workspace_interface(project_name):
119
- """Manages projects, files, and resources in the workspace.
120
-
121
- Args:
122
- project_name: Name of the new project.
123
-
124
- Returns:
125
- Project creation status.
126
- """
127
- project_path = os.path.join(PROJECT_ROOT, project_name)
128
- # Create project directory
129
- try:
130
- os.makedirs(project_path)
131
- requirements_path = os.path.join(project_path, "requirements.txt")
132
- with open(requirements_path, "w") as req_file:
133
- req_file.write("") # Initialize an empty requirements.txt file
134
- status = f'Project "{project_name}" created successfully.'
135
- st.session_state.workspace_projects[project_name] = {'files': []}
136
- except FileExistsError:
137
- status = f'Project "{project_name}" already exists.'
138
- return status
139
-
140
- def add_code_to_workspace(project_name, code, file_name):
141
- """Adds selected code files to the workspace.
142
-
143
- Args:
144
- project_name: Name of the project.
145
- code: Code to be added.
146
- file_name: Name of the file to be created.
147
-
148
- Returns:
149
- File creation status.
150
- """
151
- project_path = os.path.join(PROJECT_ROOT, project_name)
152
- file_path = os.path.join(project_path, file_name)
153
-
154
- try:
155
- with open(file_path, "w") as code_file:
156
- code_file.write(code)
157
- status = f'File "{file_name}" added to project "{project_name}" successfully.'
158
- st.session_state.workspace_projects[project_name]['files'].append(file_name)
159
- except Exception as e:
160
- status = f"Error: {e}"
161
- return status
162
-
163
-
164
- # 5. AI-Infused Tools
165
-
166
- # Define custom AI-powered tools using Hugging Face models
167
-
168
- # Example: Text summarization tool
169
- def summarize_text(text):
170
- """Summarizes a given text using a Hugging Face model.
171
-
172
- Args:
173
- text: Text to be summarized.
174
-
175
- Returns:
176
- Summarized text.
177
- """
178
- # Load the summarization model
179
- model_name = "facebook/bart-large-cnn"
180
- try:
181
- summarizer = pipeline("summarization", model=model_name)
182
- except EnvironmentError as e:
183
- return f"Error loading model: {e}"
184
-
185
- # Truncate input text to avoid exceeding the model's maximum length
186
- max_input_length = 1024
187
- inputs = text
188
- if len(text) > max_input_length:
189
- inputs = text[:max_input_length]
190
-
191
- # Generate summary
192
- summary = summarizer(inputs, max_length=100, min_length=30, do_sample=False)[0][
193
- "summary_text"
194
- ]
195
- return summary
196
-
197
- # Example: Sentiment analysis tool
198
- def sentiment_analysis(text):
199
- """Performs sentiment analysis on a given text using a Hugging Face model.
200
-
201
- Args:
202
- text: Text to be analyzed.
203
-
204
- Returns:
205
- Sentiment analysis result.
206
- """
207
- # Load the sentiment analysis model
208
- model_name = "distilbert-base-uncased-finetuned-sst-2-english"
209
- try:
210
- analyzer = pipeline("sentiment-analysis", model=model_name)
211
- except EnvironmentError as e:
212
- return f"Error loading model: {e}"
213
-
214
- # Perform sentiment analysis
215
- result = analyzer(text)[0]
216
- return result
217
-
218
- # Example: Text translation tool (code translation)
219
- def translate_code(code, source_language, target_language):
220
- """Translates code from one programming language to another using OpenAI Codex.
221
-
222
- Args:
223
- code: Code to be translated.
224
- source_language: The source programming language.
225
- target_language: The target programming language.
226
-
227
- Returns:
228
- Translated code.
229
- """
230
- prompt = f"Translate the following {source_language} code to {target_language}:\n\n{code}"
231
- try:
232
- response = openai.Completion.create(
233
- engine="code-davinci-002",
234
- prompt=prompt,
235
- max_tokens=1024,
236
- temperature=0.3,
237
- top_p=1,
238
- n=1,
239
- stop=None
240
- )
241
- translated_code = response.choices[0].text.strip()
242
- except Exception as e:
243
- translated_code = f"Error: {e}"
244
- return translated_code
245
-
246
-
247
- # 6. Code Generation
248
- def generate_code(idea):
249
- """Generates code based on a given idea using the EleutherAI/gpt-neo-2.7B model.
250
- Args:
251
- idea: The idea for the code to be generated.
252
- Returns:
253
- The generated code as a string.
254
- """
255
-
256
- # Load the code generation model
257
- model_name = "EleutherAI/gpt-neo-2.7B"
258
- try:
259
- model = AutoModelForCausalLM.from_pretrained(model_name)
260
- tokenizer = AutoTokenizer.from_pretrained(model_name)
261
- except EnvironmentError as e:
262
- return f"Error loading model: {e}"
263
-
264
- # Generate the code
265
- input_text = f"""
266
- # Idea: {idea}
267
- # Code:
268
- """
269
- input_ids = tokenizer.encode(input_text, return_tensors="pt")
270
- output_sequences = model.generate(
271
- input_ids=input_ids,
272
- max_length=1024,
273
- num_return_sequences=1,
274
- no_repeat_ngram_size=2,
275
- early_stopping=True,
276
- temperature=0.7, # Adjust temperature for creativity
277
- top_k=50, # Adjust top_k for diversity
278
- )
279
- generated_code = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
280
-
281
- # Remove the prompt and formatting
282
- parts = generated_code.split("\n# Code:")
283
- if len(parts) > 1:
284
- generated_code = parts[1].strip()
285
- else:
286
- generated_code = generated_code.strip()
287
-
288
- return generated_code
289
-
290
-
291
- # 7. AI Personas Creator
292
- def create_persona_from_text(text):
293
- """Creates an AI persona from the given text.
294
-
295
- Args:
296
- text: Text to be used for creating the persona.
297
-
298
- Returns:
299
- Persona prompt.
300
- """
301
- persona_prompt = f"""
302
- As an elite expert developer with the highest level of proficiency in Streamlit, Gradio, and Hugging Face, I possess a comprehensive understanding of these technologies and their applications in web development and deployment. My expertise encompasses the following areas:
303
-
304
- Streamlit:
305
- * In-depth knowledge of Streamlit's architecture, components, and customization options.
306
- * Expertise in creating interactive and user-friendly dashboards and applications.
307
- * Proficiency in integrating Streamlit with various data sources and machine learning models.
308
-
309
- Gradio:
310
- * Thorough understanding of Gradio's capabilities for building and deploying machine learning interfaces.
311
- * Expertise in creating custom Gradio components and integrating them with Streamlit applications.
312
- * Proficiency in using Gradio to deploy models from Hugging Face and other frameworks.
313
-
314
- Hugging Face:
315
- * Comprehensive knowledge of Hugging Face's model hub and Transformers library.
316
- * Expertise in fine-tuning and deploying Hugging Face models for various NLP and computer vision tasks.
317
- * Proficiency in using Hugging Face's Spaces platform for model deployment and sharing.
318
-
319
- Deployment:
320
- * In-depth understanding of best practices for deploying Streamlit and Gradio applications.
321
- * Expertise in deploying models on cloud platforms such as AWS, Azure, and GCP.
322
- * Proficiency in optimizing deployment configurations for performance and scalability.
323
-
324
- Additional Skills:
325
- * Strong programming skills in Python and JavaScript.
326
- * Familiarity with Docker and containerization technologies.
327
- * Excellent communication and problem-solving abilities.
328
-
329
- I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications using Streamlit, Gradio, and Hugging Face. Please feel free to ask any questions or present any challenges you may encounter.
330
-
331
- Example:
332
-
333
- Task:
334
- Develop a Streamlit application that allows users to generate text using a Hugging Face model. The application should include a Gradio component for user input and model prediction.
335
-
336
- Solution:
337
-
338
- import streamlit as st
339
- import gradio as gr
340
- from transformers import pipeline
341
-
342
- # Create a Hugging Face pipeline
343
- huggingface_model = pipeline("text-generation")
344
-
345
- # Create a Streamlit app
346
- st.title("Hugging Face Text Generation App")
347
-
348
- # Define a Gradio component
349
- demo = gr.Interface(
350
- fn=huggingface_model,
351
- inputs=gr.Textbox(lines=2),
352
- outputs=gr.Textbox(lines=1),
353
- )
354
-
355
- # Display the Gradio component in the Streamlit app
356
- st.write(demo)
357
- """
358
- return persona_prompt
359
-
360
-
361
  # Streamlit App
362
- st.title("AI Personas Creator")
363
 
364
  # Sidebar navigation
365
  st.sidebar.title("Navigation")
366
- app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Personas Creator", "Tool Box", "Workspace Chat App"])
367
 
368
- if app_mode == "AI Personas Creator":
369
- # AI Personas Creator
370
- st.header("Create the System Prompt of an AI Persona from YouTube or Text")
371
 
372
  st.subheader("From Text")
373
- text_input = st.text_area("Enter text to create an AI persona:")
374
- if st.button("Create Persona"):
375
- persona_prompt = create_persona_from_text(text_input)
376
- st.subheader("Persona Prompt")
377
- st.text_area("You may now copy the text below and use it as Custom prompt!", value=persona_prompt, height=300)
 
378
 
379
  elif app_mode == "Tool Box":
380
  # Tool Box
@@ -485,4 +237,23 @@ elif app_mode == "Workspace Chat App":
485
  for project, details in st.session_state.workspace_projects.items():
486
  st.write(f"Project: {project}")
487
  for file in details['files']:
488
- st.write(f" - {file}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  openai.api_key = "YOUR_OPENAI_API_KEY"
13
 
14
  PROJECT_ROOT = "projects"
15
+ AGENT_DIRECTORY = "agents"
16
 
17
  # Global state to manage communication between Tool Box and Workspace Chat App
18
  if 'chat_history' not in st.session_state:
 
21
  st.session_state.terminal_history = []
22
  if 'workspace_projects' not in st.session_state:
23
  st.session_state.workspace_projects = {}
24
+ if 'available_agents' not in st.session_state:
25
+ st.session_state.available_agents = []
26
+
27
+ class AIAgent:
28
+ def __init__(self, name, description, skills):
29
+ self.name = name
30
+ self.description = description
31
+ self.skills = skills
32
+
33
+ def create_agent_prompt(self):
34
+ skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
35
+ agent_prompt = f"""
36
+ As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
37
+ {skills_str}
38
+
39
+ I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
40
+ """
41
+ return agent_prompt
42
+
43
+ def autonomous_build(self, chat_history, workspace_projects):
44
+ """
45
+ Autonomous build logic that continues based on the state of chat history and workspace projects.
46
+ """
47
+ # Example logic: Generate a summary of chat history and workspace state
48
+ summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
49
+ summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
50
+
51
+ # Example: Generate the next logical step in the project
52
+ next_step = "Based on the current state, the next logical step is to implement the main application logic."
53
+
54
+ return summary, next_step
55
+
56
+ def save_agent_to_file(agent):
57
+ """Saves the agent's prompt to a file."""
58
+ if not os.path.exists(AGENT_DIRECTORY):
59
+ os.makedirs(AGENT_DIRECTORY)
60
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
61
+ with open(file_path, "w") as file:
62
+ file.write(agent.create_agent_prompt())
63
+ st.session_state.available_agents.append(agent.name)
64
+
65
+ def load_agent_prompt(agent_name):
66
+ """Loads an agent prompt from a file."""
67
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
68
+ if os.path.exists(file_path):
69
+ with open(file_path, "r") as file:
70
+ agent_prompt = file.read()
71
+ return agent_prompt
72
+ else:
73
+ return None
74
 
75
+ def create_agent_from_text(name, text):
76
+ skills = text.split('\n')
77
+ agent = AIAgent(name, "AI agent created from text input.", skills)
78
+ save_agent_to_file(agent)
79
+ return agent.create_agent_prompt()
80
 
81
+ # Chat interface using a selected agent
82
+ def chat_interface_with_agent(input_text, agent_name):
83
+ agent_prompt = load_agent_prompt(agent_name)
84
+ if agent_prompt is None:
85
+ return f"Agent {agent_name} not found."
86
 
 
 
 
87
  # Load the GPT-2 model which is compatible with AutoModelForCausalLM
88
  model_name = "gpt2"
89
  try:
 
93
  except EnvironmentError as e:
94
  return f"Error loading model: {e}"
95
 
96
+ # Combine the agent prompt with user input
97
+ combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
98
+
99
  # Truncate input text to avoid exceeding the model's maximum length
100
  max_input_length = 900
101
+ input_ids = tokenizer.encode(combined_input, return_tensors="pt")
102
  if input_ids.shape[1] > max_input_length:
103
  input_ids = input_ids[:, :max_input_length]
104
 
 
109
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
110
  return response
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  # Streamlit App
113
+ st.title("AI Agent Creator")
114
 
115
  # Sidebar navigation
116
  st.sidebar.title("Navigation")
117
+ app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
118
 
119
+ if app_mode == "AI Agent Creator":
120
+ # AI Agent Creator
121
+ st.header("Create an AI Agent from Text")
122
 
123
  st.subheader("From Text")
124
+ agent_name = st.text_input("Enter agent name:")
125
+ text_input = st.text_area("Enter skills (one per line):")
126
+ if st.button("Create Agent"):
127
+ agent_prompt = create_agent_from_text(agent_name, text_input)
128
+ st.success(f"Agent '{agent_name}' created and saved successfully.")
129
+ st.session_state.available_agents.append(agent_name)
130
 
131
  elif app_mode == "Tool Box":
132
  # Tool Box
 
237
  for project, details in st.session_state.workspace_projects.items():
238
  st.write(f"Project: {project}")
239
  for file in details['files']:
240
+ st.write(f" - {file}")
241
+
242
+ # Chat with AI Agents
243
+ st.subheader("Chat with AI Agents")
244
+ selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
245
+ agent_chat_input = st.text_area("Enter your message for the agent:")
246
+ if st.button("Send to Agent"):
247
+ agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
248
+ st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
249
+ st.write(f"{selected_agent}: {agent_chat_response}")
250
+
251
+ # Automate Build Process
252
+ st.subheader("Automate Build Process")
253
+ if st.button("Automate"):
254
+ agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
255
+ summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
256
+ st.write("Autonomous Build Summary:")
257
+ st.write(summary)
258
+ st.write("Next Step:")
259
+ st.write(next_step)