whackthejacker commited on
Commit
d877c0b
·
verified ·
1 Parent(s): 895715d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -109
app.py CHANGED
@@ -5,36 +5,44 @@ import streamlit as st
5
  import black
6
  from pylint import lint
7
  from io import StringIO
8
- import requests
9
  import logging
10
  import atexit
11
  import time
12
  from datetime import datetime
 
 
13
 
14
  HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
15
  PROJECT_ROOT = "projects"
16
  AGENT_DIRECTORY = "agents"
17
 
18
- # Global state to manage communication between Tool Box and Workspace Chat App
19
- if 'chat_history' not in st.session_state:
20
- st.session_state.chat_history = []
21
- if 'terminal_history' not in st.session_state:
22
- st.session_state.terminal_history = []
23
- if 'workspace_projects' not in st.session_state:
24
- st.session_state.workspace_projects = {}
25
- if 'available_agents' not in st.session_state:
26
- st.session_state.available_agents = []
27
- if 'current_state' not in st.session_state:
28
- st.session_state.current_state = {
29
- 'toolbox': {},
30
- 'workspace_chat': {}
31
- }
 
 
 
32
 
33
  class InstructModel:
34
  def __init__(self):
35
  """Initialize the Mixtral-8x7B-Instruct model"""
 
 
 
 
 
36
  try:
37
- self.model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
38
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
39
  self.model = AutoModelForCausalLM.from_pretrained(
40
  self.model_name,
@@ -46,33 +54,26 @@ class InstructModel:
46
 
47
  def generate_response(self, prompt: str) -> str:
48
  """Generate a response using the Mixtral model"""
49
- try:
50
- # Format the prompt according to Mixtral's expected format
51
- formatted_prompt = f"<s>[INST] {prompt} [/INST]"
52
-
53
- # Tokenize input
54
- inputs = self.tokenizer(formatted_prompt, return_tensors="pt").to(self.model.device)
55
-
56
- # Generate response
57
- outputs = self.model.generate(
58
- inputs.input_ids,
59
- max_new_tokens=512,
60
- temperature=0.7,
61
- top_p=0.95,
62
- do_sample=True,
63
- pad_token_id=self.tokenizer.eos_token_id
64
- )
65
-
66
- # Decode and clean up response
67
- response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
68
-
69
- # Remove the prompt from the response
70
- response = response.replace(formatted_prompt, "").strip()
71
-
72
- return response
73
-
74
- except Exception as e:
75
- raise Exception(f"Error generating response: {str(e)}")
76
 
77
  def __del__(self):
78
  """Cleanup when the model is no longer needed"""
@@ -83,8 +84,6 @@ class InstructModel:
83
  except:
84
  pass
85
 
86
-
87
-
88
  class AIAgent:
89
  def __init__(self, name, description, skills):
90
  self.name = name
@@ -93,22 +92,26 @@ class AIAgent:
93
 
94
  def create_agent_prompt(self):
95
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
96
- agent_prompt = f"""
97
- As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
98
- {skills_str}
99
- I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
100
- """
101
- return agent_prompt
102
 
103
  def autonomous_build(self, chat_history, workspace_projects):
104
- summary = "Chat History:\n" + "\n".join([f":User {u}\nAgent: {a}" for u, a in chat_history])
105
- summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
106
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
107
  return summary, next_step
108
 
 
 
 
 
 
 
109
  def save_agent_to_file(agent):
110
- if not os.path.exists(AGENT_DIRECTORY):
111
- os.makedirs(AGENT_DIRECTORY)
112
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
113
  config_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}Config.txt")
114
  with open(file_path, "w") as file:
@@ -122,10 +125,8 @@ def load_agent_prompt(agent_name):
122
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
123
  if os.path.exists(file_path):
124
  with open(file_path, "r") as file:
125
- agent_prompt = file.read()
126
- return agent_prompt
127
- else:
128
- return None
129
 
130
  def create_agent_from_text(name, text):
131
  skills = text.split('\n')
@@ -135,39 +136,27 @@ def create_agent_from_text(name, text):
135
 
136
  def chat_interface(input_text):
137
  """Handles chat interactions without a specific agent."""
138
- try:
139
- model = InstructModel() # Initialize the Mixtral Instruct model
140
- response = model.generate_response(f":User {input_text}\nAI:")
141
- return response
142
- except EnvironmentError as e:
143
- return f"Error communicating with AI: {e}"
144
 
145
  def chat_interface_with_agent(input_text, agent_name):
146
  agent_prompt = load_agent_prompt(agent_name)
147
  if agent_prompt is None:
148
  return f"Agent {agent_name} not found."
149
-
150
- try:
151
- model = InstructModel() # Initialize Mixtral Instruct model
152
- except EnvironmentError as e:
153
- return f"Error loading model: {e}"
154
-
155
  combined_input = f"{agent_prompt}\n\n:User {input_text}\nAgent:"
156
- response = model.generate_response(combined_input)
157
- return response
158
 
159
  def workspace_interface(project_name):
160
  project_path = os.path.join(PROJECT_ROOT, project_name)
161
- if not os.path.exists(PROJECT_ROOT):
162
- os.makedirs(PROJECT_ROOT)
163
  if not os.path.exists(project_path):
164
  os.makedirs(project_path)
165
  st.session_state.workspace_projects[project_name] = {"files": []}
166
  st.session_state.current_state['workspace_chat']['project_name'] = project_name
167
  commit_and_push_changes(f"Create project {project_name}")
168
  return f"Project {project_name} created successfully."
169
- else:
170
- return f"Project {project_name} already exists."
171
 
172
  def add_code_to_workspace(project_name, code, file_name):
173
  project_path = os.path.join(PROJECT_ROOT, project_name)
@@ -179,8 +168,7 @@ def add_code_to_workspace(project_name, code, file_name):
179
  st.session_state.current_state['workspace_chat']['added_code'] = {"file_name": file_name, "code": code}
180
  commit_and_push_changes(f"Add code to {file_name} in project {project_name}")
181
  return f"Code added to {file_name} in project {project_name} successfully."
182
- else:
183
- return f"Project {project_name} does not exist."
184
 
185
  def terminal_interface(command, project_name=None):
186
  if project_name:
@@ -190,58 +178,50 @@ def terminal_interface(command, project_name=None):
190
  result = subprocess.run(command, cwd=project_path, shell=True, capture_output=True, text=True)
191
  else:
192
  result = subprocess.run(command, shell=True, capture_output=True, text=True)
193
- if result.returncode == 0:
194
- st.session_state.current_state['toolbox']['terminal_output'] = result.stdout
195
- return result.stdout
196
- else:
197
- st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
198
- return result.stderr
199
 
200
  def code_editor_interface(code):
 
 
 
 
 
201
  try:
202
- formatted_code = black.format_str(code, mode=black.FileMode())
203
  except black.NothingChanged:
204
- formatted_code = code
205
  except Exception as e:
206
  return None, f"Error formatting code with black: {e}"
207
 
 
208
  result = StringIO()
209
  sys.stdout = result
210
  sys.stderr = result
211
  try:
212
  (pylint_stdout, pylint_stderr) = lint.py_run(code, return_std=True)
213
- lint_message = pylint_stdout.getvalue() + pylint_stderr.getvalue()
214
  except Exception as e:
215
  return None, f"Error linting code with pylint: {e}"
216
  finally:
217
  sys.stdout = sys.__stdout__
218
  sys.stderr = sys.__stderr__
219
- return formatted_code, lint_message
220
 
221
  def translate_code(code, input_language, output_language):
222
- try:
223
- model = InstructModel()
224
- prompt = f"Translate the following {input_language} code to {output_language}:\n\n{code}"
225
- translated_code = model.generate_response(prompt)
226
- return translated_code
227
- except EnvironmentError as e:
228
- return f"Error loading model or translating code: {e}"
229
- except Exception as e:
230
- return f"An unexpected error occurred during code translation: {e}"
231
 
232
  def generate_code(code_idea):
233
- try:
234
- model = InstructModel() # Initialize Mixtral Instruct model
235
- except EnvironmentError as e:
236
- return f"Error loading model: {e}"
237
-
238
  prompt = f"Generate code for the following idea:\n\n{code_idea}"
239
  generated_code = model.generate_response(prompt)
240
  st.session_state.current_state['toolbox']['generated_code'] = generated_code
241
  return generated_code
242
 
243
  def commit_and_push_changes(commit_message):
244
- """Commits and pushes changes to the Hugging Face repository (needs improvement)."""
245
  try:
246
  subprocess.run(["git", "add", "."], check=True, capture_output=True, text=True)
247
  subprocess.run(["git", "commit", "-m", commit_message], check=True, capture_output=True, text=True)
@@ -259,10 +239,7 @@ st.sidebar.title("Navigation")
259
  app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
260
 
261
  if app_mode == "AI Agent Creator":
262
- # AI Agent Creator
263
  st.header("Create an AI Agent from Text")
264
-
265
- st.subheader("From Text")
266
  agent_name = st.text_input("Enter agent name:")
267
  text_input = st.text_area("Enter skills (one per line):")
268
  if st.button("Create Agent"):
@@ -271,7 +248,6 @@ if app_mode == "AI Agent Creator":
271
  st.session_state.available_agents.append(agent_name)
272
 
273
  elif app_mode == "Tool Box":
274
- # Tool Box
275
  st.header("AI-Powered Tools")
276
 
277
  # Chat Interface
@@ -320,7 +296,6 @@ elif app_mode == "Tool Box":
320
  st.code(generated_code, language="python")
321
 
322
  elif app_mode == "Workspace Chat App":
323
- # Workspace Chat App
324
  st.header("Workspace Chat App")
325
 
326
  # Project Workspace Creation
 
5
  import black
6
  from pylint import lint
7
  from io import StringIO
 
8
  import logging
9
  import atexit
10
  import time
11
  from datetime import datetime
12
+ from transformers import AutoTokenizer, AutoModelForCausalLM
13
+ import torch
14
 
15
  HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
16
  PROJECT_ROOT = "projects"
17
  AGENT_DIRECTORY = "agents"
18
 
19
+ # Initialize session state
20
+ def initialize_session_state():
21
+ if 'chat_history' not in st.session_state:
22
+ st.session_state.chat_history = []
23
+ if 'terminal_history' not in st.session_state:
24
+ st.session_state.terminal_history = []
25
+ if 'workspace_projects' not in st.session_state:
26
+ st.session_state.workspace_projects = {}
27
+ if 'available_agents' not in st.session_state:
28
+ st.session_state.available_agents = []
29
+ if 'current_state' not in st.session_state:
30
+ st.session_state.current_state = {
31
+ 'toolbox': {},
32
+ 'workspace_chat': {}
33
+ }
34
+
35
+ initialize_session_state()
36
 
37
  class InstructModel:
38
  def __init__(self):
39
  """Initialize the Mixtral-8x7B-Instruct model"""
40
+ self.model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
41
+ self.load_model()
42
+
43
+ def load_model(self):
44
+ """Load the model and tokenizer"""
45
  try:
 
46
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
47
  self.model = AutoModelForCausalLM.from_pretrained(
48
  self.model_name,
 
54
 
55
  def generate_response(self, prompt: str) -> str:
56
  """Generate a response using the Mixtral model"""
57
+ formatted_prompt = self.format_prompt(prompt)
58
+ inputs = self.tokenizer(formatted_prompt, return_tensors="pt").to(self.model.device)
59
+ outputs = self.model.generate(
60
+ inputs.input_ids,
61
+ max_new_tokens=512,
62
+ temperature=0.7,
63
+ top_p=0.95,
64
+ do_sample=True,
65
+ pad_token_id=self.tokenizer.eos_token_id
66
+ )
67
+ return self.clean_response(outputs, formatted_prompt)
68
+
69
+ def format_prompt(self, prompt: str) -> str:
70
+ """Format the prompt for the model"""
71
+ return f"<s>[INST] {prompt} [/INST]"
72
+
73
+ def clean_response(self, outputs, formatted_prompt: str) -> str:
74
+ """Decode and clean up the model's response"""
75
+ response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
76
+ return response.replace(formatted_prompt, "").strip()
 
 
 
 
 
 
 
77
 
78
  def __del__(self):
79
  """Cleanup when the model is no longer needed"""
 
84
  except:
85
  pass
86
 
 
 
87
  class AIAgent:
88
  def __init__(self, name, description, skills):
89
  self.name = name
 
92
 
93
  def create_agent_prompt(self):
94
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
95
+ return f"""
96
+ As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
97
+ {skills_str}
98
+ I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
99
+ """
 
100
 
101
  def autonomous_build(self, chat_history, workspace_projects):
102
+ summary = self.summarize_chat_history(chat_history)
103
+ summary += "\n\nWorkspace Projects:\n" + self.summarize_workspace_projects(workspace_projects)
104
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
105
  return summary, next_step
106
 
107
+ def summarize_chat_history(self, chat_history):
108
+ return "Chat History:\n" + "\n".join([f":User {u}\nAgent: {a}" for u, a in chat_history])
109
+
110
+ def summarize_workspace_projects(self, workspace_projects):
111
+ return "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
112
+
113
  def save_agent_to_file(agent):
114
+ os.makedirs(AGENT_DIRECTORY, exist_ok=True)
 
115
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
116
  config_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}Config.txt")
117
  with open(file_path, "w") as file:
 
125
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
126
  if os.path.exists(file_path):
127
  with open(file_path, "r") as file:
128
+ return file.read()
129
+ return None
 
 
130
 
131
  def create_agent_from_text(name, text):
132
  skills = text.split('\n')
 
136
 
137
  def chat_interface(input_text):
138
  """Handles chat interactions without a specific agent."""
139
+ model = InstructModel()
140
+ return model.generate_response(f":User {input_text}\nAI:")
 
 
 
 
141
 
142
  def chat_interface_with_agent(input_text, agent_name):
143
  agent_prompt = load_agent_prompt(agent_name)
144
  if agent_prompt is None:
145
  return f"Agent {agent_name} not found."
146
+ model = InstructModel()
 
 
 
 
 
147
  combined_input = f"{agent_prompt}\n\n:User {input_text}\nAgent:"
148
+ return model.generate_response(combined_input)
 
149
 
150
  def workspace_interface(project_name):
151
  project_path = os.path.join(PROJECT_ROOT, project_name)
152
+ os.makedirs(PROJECT_ROOT, exist_ok=True)
 
153
  if not os.path.exists(project_path):
154
  os.makedirs(project_path)
155
  st.session_state.workspace_projects[project_name] = {"files": []}
156
  st.session_state.current_state['workspace_chat']['project_name'] = project_name
157
  commit_and_push_changes(f"Create project {project_name}")
158
  return f"Project {project_name} created successfully."
159
+ return f"Project {project_name} already exists."
 
160
 
161
  def add_code_to_workspace(project_name, code, file_name):
162
  project_path = os.path.join(PROJECT_ROOT, project_name)
 
168
  st.session_state.current_state['workspace_chat']['added_code'] = {"file_name": file_name, "code": code}
169
  commit_and_push_changes(f"Add code to {file_name} in project {project_name}")
170
  return f"Code added to {file_name} in project {project_name} successfully."
171
+ return f"Project {project_name} does not exist."
 
172
 
173
  def terminal_interface(command, project_name=None):
174
  if project_name:
 
178
  result = subprocess.run(command, cwd=project_path, shell=True, capture_output=True, text=True)
179
  else:
180
  result = subprocess.run(command, shell=True, capture_output=True, text=True)
181
+ output = result.stdout if result.returncode == 0 else result.stderr
182
+ st.session_state.current_state['toolbox']['terminal_output'] = output
183
+ return output
 
 
 
184
 
185
  def code_editor_interface(code):
186
+ formatted_code = format_code(code)
187
+ lint_message = lint_code(formatted_code)
188
+ return formatted_code, lint_message
189
+
190
+ def format_code(code):
191
  try:
192
+ return black.format_str(code, mode=black.FileMode())
193
  except black.NothingChanged:
194
+ return code
195
  except Exception as e:
196
  return None, f"Error formatting code with black: {e}"
197
 
198
+ def lint_code(code):
199
  result = StringIO()
200
  sys.stdout = result
201
  sys.stderr = result
202
  try:
203
  (pylint_stdout, pylint_stderr) = lint.py_run(code, return_std=True)
204
+ return pylint_stdout.getvalue() + pylint_stderr.getvalue()
205
  except Exception as e:
206
  return None, f"Error linting code with pylint: {e}"
207
  finally:
208
  sys.stdout = sys.__stdout__
209
  sys.stderr = sys.__stderr__
 
210
 
211
  def translate_code(code, input_language, output_language):
212
+ model = InstructModel()
213
+ prompt = f"Translate the following {input_language} code to {output_language}:\n\n{code}"
214
+ return model.generate_response(prompt)
 
 
 
 
 
 
215
 
216
  def generate_code(code_idea):
217
+ model = InstructModel()
 
 
 
 
218
  prompt = f"Generate code for the following idea:\n\n{code_idea}"
219
  generated_code = model.generate_response(prompt)
220
  st.session_state.current_state['toolbox']['generated_code'] = generated_code
221
  return generated_code
222
 
223
  def commit_and_push_changes(commit_message):
224
+ """Commits and pushes changes to the Hugging Face repository."""
225
  try:
226
  subprocess.run(["git", "add", "."], check=True, capture_output=True, text=True)
227
  subprocess.run(["git", "commit", "-m", commit_message], check=True, capture_output=True, text=True)
 
239
  app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
240
 
241
  if app_mode == "AI Agent Creator":
 
242
  st.header("Create an AI Agent from Text")
 
 
243
  agent_name = st.text_input("Enter agent name:")
244
  text_input = st.text_area("Enter skills (one per line):")
245
  if st.button("Create Agent"):
 
248
  st.session_state.available_agents.append(agent_name)
249
 
250
  elif app_mode == "Tool Box":
 
251
  st.header("AI-Powered Tools")
252
 
253
  # Chat Interface
 
296
  st.code(generated_code, language="python")
297
 
298
  elif app_mode == "Workspace Chat App":
 
299
  st.header("Workspace Chat App")
300
 
301
  # Project Workspace Creation