acecalisto3 commited on
Commit
035579e
·
verified ·
1 Parent(s): c422b39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +461 -1
app.py CHANGED
@@ -161,7 +161,467 @@ def chat_interface_with_agent(input_text: str, agent_name: str) -> str:
161
  if agent_prompt is None:
162
  return f"Agent {agent_name} not found."
163
 
164
- model_name = "MaziyarPanahi/Codestral-22B-v0.1-GGUF"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  try:
166
  generator = pipeline("text-generation", model=model_name)
167
  generator.tokenizer.pad_token = generator.tokenizer.eos_token
 
161
  if agent_prompt is None:
162
  return f"Agent {agent_name} not found."
163
 
164
+ model_name = "MaziyarPanahi/Codestral-22B-v0.1-GGUF"import os
165
+ import subprocess
166
+ import streamlit as st
167
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
168
+ import black
169
+ from pylint import lint
170
+ from io import StringIO
171
+
172
+ HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
173
+ PROJECT_ROOT = "projects"
174
+ AGENT_DIRECTORY = "agents"
175
+
176
+ # Global state to manage communication between Tool Box and Workspace Chat App
177
+ if 'chat_history' not in st.session_state:
178
+ st.session_state.chat_history = []
179
+ if 'terminal_history' not in st.session_state:
180
+ st.session_state.terminal_history = []
181
+ if 'workspace_projects' not in st.session_state:
182
+ st.session_state.workspace_projects = {}
183
+ if 'available_agents' not in st.session_state:
184
+ st.session_state.available_agents = []
185
+ if 'current_state' not in st.session_state:
186
+ st.session_state.current_state = {
187
+ 'toolbox': {},
188
+ 'workspace_chat': {}
189
+ }
190
+
191
+ class AIAgent:
192
+ def __init__(self, name, description, skills):
193
+ self.name = name
194
+ self.description = description
195
+ self.skills = skills
196
+
197
+ def create_agent_prompt(self):
198
+ skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
199
+ agent_prompt = f"""
200
+ As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
201
+ {skills_str}
202
+
203
+ I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
204
+ """
205
+ return agent_prompt
206
+
207
+ def autonomous_build(self, chat_history, workspace_projects):
208
+ """
209
+ Autonomous build logic that continues based on the state of chat history and workspace projects.
210
+ """
211
+ summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
212
+ summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
213
+
214
+ next_step = "Based on the current state, the next logical step is to implement the main application logic."
215
+
216
+ return summary, next_step
217
+
218
+ def save_agent_to_file(agent):
219
+ """Saves the agent's prompt to a file locally and then commits to the Hugging Face repository."""
220
+ if not os.path.exists(AGENT_DIRECTORY):
221
+ os.makedirs(AGENT_DIRECTORY)
222
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
223
+ config_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}Config.txt")
224
+ with open(file_path, "w") as file:
225
+ file.write(agent.create_agent_prompt())
226
+ with open(config_path, "w") as file:
227
+ file.write(f"Agent Name: {agent.name}\nDescription: {agent.description}")
228
+ st.session_state.available_agents.append(agent.name)
229
+
230
+ commit_and_push_changes(f"Add agent {agent.name}")
231
+
232
+ def load_agent_prompt(agent_name):
233
+ """Loads an agent prompt from a file."""
234
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
235
+ if os.path.exists(file_path):
236
+ with open(file_path, "r") as file:
237
+ agent_prompt = file.read()
238
+ return agent_prompt
239
+ else:
240
+ return None
241
+
242
+ def create_agent_from_text(name, text):
243
+ skills = text.split('\n')
244
+ agent = AIAgent(name, "AI agent created from text input.", skills)
245
+ save_agent_to_file(agent)
246
+ return agent.create_agent_prompt()
247
+
248
+ # Chat interface using a selected agent
249
+ def chat_interface_with_agent(input_text, agent_name):
250
+ agent_prompt = load_agent_prompt(agent_name)
251
+ if agent_prompt is None:
252
+ return f"Agent {agent_name} not found."
253
+
254
+ # Load the GPT-2 model which is compatible with AutoModelForCausalLM
255
+ model_name = "gpt2"
256
+ try:
257
+ model = AutoModelForCausalLM.from_pretrained(model_name)
258
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
259
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
260
+ except EnvironmentError as e:
261
+ return f"Error loading model: {e}"
262
+
263
+ # Combine the agent prompt with user input
264
+ combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
265
+
266
+ # Truncate input text to avoid exceeding the model's maximum length
267
+ max_input_length = 900
268
+ input_ids = tokenizer.encode(combined_input, return_tensors="pt")
269
+ if input_ids.shape[1] > max_input_length:
270
+ input_ids = input_ids[:, :max_input_length]
271
+
272
+ # Generate chatbot response
273
+ outputs = model.generate(
274
+ input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True, pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
275
+ )
276
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
277
+ return response
278
+
279
+ def workspace_interface(project_name):
280
+ project_path = os.path.join(PROJECT_ROOT, project_name)
281
+ if not os.path.exists(PROJECT_ROOT):
282
+ os.makedirs(PROJECT_ROOT)
283
+ if not os.path.exists(project_path):
284
+ os.makedirs(project_path)
285
+ st.session_state.workspace_projects[project_name] = {"files": []}
286
+ st.session_state.current_state['workspace_chat']['project_name'] = project_name
287
+ commit_and_push_changes(f"Create project {project_name}")
288
+ return f"Project {project_name} created successfully."
289
+ else:
290
+ return f"Project {project_name} already exists."
291
+
292
+ def add_code_to_workspace(project_name, code, file_name):
293
+ project_path = os.path.join(PROJECT_ROOT, project_name)
294
+ if os.path.exists(project_path):
295
+ file_path = os.path.join(project_path, file_name)
296
+ with open(file_path, "w") as file:
297
+ file.write(code)
298
+ st.session_state.workspace_projects[project_name]["files"].append(file_name)
299
+ st.session_state.current_state['workspace_chat']['added_code'] = {"file_name": file_name, "code": code}
300
+ commit_and_push_changes(f"Add code to {file_name} in project {project_name}")
301
+ return f"Code added to {file_name} in project {project_name} successfully."
302
+ else:
303
+ return f"Project {project_name} does not exist."
304
+
305
+ def terminal_interface(command, project_name=None):
306
+ if project_name:
307
+ project_path = os.path.join(PROJECT_ROOT, project_name)
308
+ if not os.path.exists(project_path):
309
+ return f"Project {project_name} does not exist."
310
+ result = subprocess.run(command, cwd=project_path, shell=True, capture_output=True, text=True)
311
+ else:
312
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
313
+ if result.returncode == 0:
314
+ st.session_state.current_state['toolbox']['terminal_output'] = result.stdout
315
+ return result.stdout
316
+ else:
317
+ st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
318
+ return result.stderr
319
+
320
+ def summarize_text(text):
321
+ summarizer = pipeline("summarization")
322
+ summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
323
+ st.session_state.current_state['toolbox']['summary'] = summary[0]['summary_text']
324
+ return summary[0]['summary_text']
325
+
326
+ def sentiment_analysis(text):
327
+ analyzer = pipeline("sentiment-analysis")
328
+ sentiment = analyzer(text)
329
+ st.session_state.current_state['toolbox']['sentiment'] = sentiment[0]
330
+ return sentiment[0]
331
+
332
+ # ... [rest of the translate_code function, but remove the OpenAI API call and replace it with your own logic] ...
333
+
334
+ def generate_code(code_idea):
335
+ # Replace this with a call to a Hugging Face model or your own logic
336
+ # For example, using a text-generation pipeline:
337
+ generator = pipeline('text-generation', model='gpt4o')
338
+ generated_code = generator(code_idea, max_length=10000, num_return_sequences=1)[0]['generated_text']
339
+ messages=[
340
+ {"role": "system", "content": "You are an expert software developer."},
341
+ {"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
342
+ ]
343
+ st.session_state.current_state['toolbox']['generated_code'] = generated_code
344
+
345
+ return generated_code
346
+
347
+ def translate_code(code, input_language, output_language):
348
+ # Define a dictionary to map programming languages to their corresponding file extensions
349
+ language_extensions = {
350
+ "Python": "py",
351
+ "JavaScript": "js",
352
+ "Java": "java",
353
+ "C++": "cpp",
354
+ "C#": "cs",
355
+ "Ruby": "rb",
356
+ "Go": "go",
357
+ "PHP": "php",
358
+ "Swift": "swift",
359
+ "TypeScript": "ts",
360
+ }
361
+
362
+ # Add code to handle edge cases such as invalid input and unsupported programming languages
363
+ if input_language not in language_extensions:
364
+ raise ValueError(f"Invalid input language: {input_language}")
365
+ if output_language not in language_extensions:
366
+ raise ValueError(f"Invalid output language: {output_language}")
367
+
368
+ # Use the dictionary to map the input and output languages to their corresponding file extensions
369
+ input_extension = language_extensions[input_language]
370
+ output_extension = language_extensions[output_language]
371
+
372
+ # Translate the code using the OpenAI API
373
+ prompt = f"Translate this code from {input_language} to {output_language}:\n\n{code}"
374
+ response = openai.ChatCompletion.create(
375
+ model="gpt-4",
376
+ messages=[
377
+ {"role": "system", "content": "You are an expert software developer."},
378
+ {"role": "user", "content": prompt}
379
+ ]
380
+ )
381
+ translated_code = response.choices[0].message['content'].strip()
382
+
383
+ # Return the translated code
384
+ translated_code = response.choices[0].message['content'].strip()
385
+ st.session_state.current_state['toolbox']['translated_code'] = translated_code
386
+ return translated_code
387
+
388
+ def generate_code(code_idea):
389
+ response = openai.ChatCompletion.create(
390
+ model="gpt-4",
391
+ messages=[
392
+ {"role": "system", "content": "You are an expert software developer."},
393
+ {"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
394
+ ]
395
+ )
396
+ generated_code = response.choices[0].message['content'].strip()
397
+ st.session_state.current_state['toolbox']['generated_code'] = generated_code
398
+ return generated_code
399
+
400
+ def commit_and_push_changes(commit_message):
401
+ """Commits and pushes changes to the Hugging Face repository."""
402
+ commands = [
403
+ "git add .",
404
+ f"git commit -m '{commit_message}'",
405
+ "git push"
406
+ ]
407
+ for command in commands:
408
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
409
+ if result.returncode != 0:
410
+ st.error(f"Error executing command '{command}': {result.stderr}")
411
+ break
412
+
413
+ def interact_with_web_interface(agent, api_key, url, payload):
414
+ """
415
+ Interacts with a web interface using the provided API key and payload.
416
+
417
+ Args:
418
+ agent: The AIAgent instance.
419
+ api_key: The API key for the web interface.
420
+ url: The URL of the web interface.
421
+ payload: The payload to send to the web interface.
422
+
423
+ Returns:
424
+ The response from the web interface.
425
+ """
426
+
427
+ # Use the agent's knowledge to determine the appropriate HTTP method and headers.
428
+ http_method = agent.get_http_method(url)
429
+ headers = agent.get_headers(url)
430
+
431
+ # Add the API key to the headers.
432
+ headers["Authorization"] = f"Bearer {api_key}"
433
+
434
+ # Send the request to the web interface.
435
+ response = requests.request(http_method, url, headers=headers, json=payload)
436
+
437
+ # Return the response.
438
+ return response
439
+
440
+ def get_http_method(url):
441
+ """
442
+ Determines the appropriate HTTP method for the given URL.
443
+
444
+ Args:
445
+ url: The URL of the web interface.
446
+
447
+ Returns:
448
+ The HTTP method (e.g., "GET", "POST", "PUT", "DELETE").
449
+ """
450
+
451
+ # Use the agent's knowledge to determine the HTTP method.
452
+ # For example, the agent might know that the URL is for a REST API endpoint that supports CRUD operations.
453
+
454
+ return "GET"
455
+
456
+ def get_headers(url):
457
+ """
458
+ Determines the appropriate headers for the given URL.
459
+
460
+ Args:
461
+ url: The URL of the web interface.
462
+
463
+ Returns:
464
+ A dictionary of headers.
465
+ """
466
+
467
+ # Use the agent's knowledge to determine the headers.
468
+ # For example, the agent might know that the web interface requires an "Authorization" header with an API key.
469
+
470
+ return {"Content-Type": "application/json"}
471
+
472
+ # ... (rest of the code)
473
+
474
+ if app_mode == "Toolbox":
475
+
476
+ # Streamlit App
477
+ st.title("AI Agent Creator")
478
+
479
+ # Sidebar navigation
480
+ st.sidebar.title("Navigation")
481
+ app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
482
+
483
+ if app_mode == "AI Agent Creator":
484
+ # AI Agent Creator
485
+ st.header("Create an AI Agent from Text")
486
+
487
+ st.subheader("From Text")
488
+ agent_name = st.text_input("Enter agent name:")
489
+ text_input = st.text_area("Enter skills (one per line):")
490
+ if st.button("Create Agent"):
491
+ agent_prompt = create_agent_from_text(agent_name, text_input)
492
+ st.success(f"Agent '{agent_name}' created and saved successfully.")
493
+ st.session_state.available_agents.append(agent_name)
494
+
495
+ elif app_mode == "Tool Box":
496
+ # Tool Box
497
+ st.header("AI-Powered Tools")
498
+
499
+ # Chat Interface
500
+ st.subheader("Chat with CodeCraft")
501
+ chat_input = st.text_area("Enter your message:")
502
+ if st.button("Send"):
503
+ if chat_input.startswith("@"):
504
+ agent_name = chat_input.split(" ")[0][1:] # Extract agent_name from @agent_name
505
+ chat_input = " ".join(chat_input.split(" ")[1:]) # Remove agent_name from input
506
+ chat_response = chat_interface_with_agent(chat_input, agent_name)
507
+ else:
508
+ chat_response = chat_interface(chat_input)
509
+ st.session_state.chat_history.append((chat_input, chat_response))
510
+ st.write(f"CodeCraft: {chat_response}")
511
+
512
+ # Terminal Interface
513
+ st.subheader("Terminal")
514
+ terminal_input = st.text_input("Enter a command:")
515
+ if st.button("Run"):
516
+ terminal_output = terminal_interface(terminal_input)
517
+ st.session_state.terminal_history.append((terminal_input, terminal_output))
518
+ st.code(terminal_output, language="bash")
519
+
520
+ # Code Editor Interface
521
+ st.subheader("Code Editor")
522
+ code_editor = st.text_area("Write your code:", height=300)
523
+ if st.button("Format & Lint"):
524
+ formatted_code, lint_message = code_editor_interface(code_editor)
525
+ st.code(formatted_code, language="python")
526
+ st.info(lint_message)
527
+
528
+ # Text Summarization Tool
529
+ st.subheader("Summarize Text")
530
+ text_to_summarize = st.text_area("Enter text to summarize:")
531
+ if st.button("Summarize"):
532
+ summary = summarize_text(text_to_summarize)
533
+ st.write(f"Summary: {summary}")
534
+
535
+ # Sentiment Analysis Tool
536
+ st.subheader("Sentiment Analysis")
537
+ sentiment_text = st.text_area("Enter text for sentiment analysis:")
538
+ if st.button("Analyze Sentiment"):
539
+ sentiment = sentiment_analysis(sentiment_text)
540
+ st.write(f"Sentiment: {sentiment}")
541
+
542
+ # Text Translation Tool (Code Translation)
543
+ st.subheader("Translate Code")
544
+ code_to_translate = st.text_area("Enter code to translate:")
545
+ input_language = st.text_input("Enter input language (e.g. 'Python'):")
546
+ output_language = st.text_input("Enter output language (e.g. 'JavaScript'):")
547
+ if st.button("Translate Code"):
548
+ translated_code = translate_code(code_to_translate, input_language, output_language)
549
+ st.code(translated_code, language=output_language.lower())
550
+
551
+ # Code Generation
552
+ st.subheader("Code Generation")
553
+ code_idea = st.text_input("Enter your code idea:")
554
+ if st.button("Generate Code"):
555
+ generated_code = generate_code(code_idea)
556
+ st.code(generated_code, language="python")
557
+
558
+ # Display Preset Commands
559
+ st.subheader("Preset Commands")
560
+ preset_commands = {
561
+ "Create a new project": "create_project('project_name')",
562
+ "Add code to workspace": "add_code_to_workspace('project_name', 'code', 'file_name')",
563
+ "Run terminal command": "terminal_interface('command', 'project_name')",
564
+ "Generate code": "generate_code('code_idea')",
565
+ "Summarize text": "summarize_text('text')",
566
+ "Analyze sentiment": "sentiment_analysis('text')",
567
+ "Translate code": "translate_code('code', 'source_language', 'target_language')",
568
+ }
569
+ for command_name, command in preset_commands.items():
570
+ st.write(f"{command_name}: `{command}`")
571
+
572
+ elif app_mode == "Workspace Chat App":
573
+ # Workspace Chat App
574
+ st.header("Workspace Chat App")
575
+
576
+ # Project Workspace Creation
577
+ st.subheader("Create a New Project")
578
+ project_name = st.text_input("Enter project name:")
579
+ if st.button("Create Project"):
580
+ workspace_status = workspace_interface(project_name)
581
+ st.success(workspace_status)
582
+
583
+ # Add Code to Workspace
584
+ st.subheader("Add Code to Workspace")
585
+ code_to_add = st.text_area("Enter code to add to workspace:")
586
+ file_name = st.text_input("Enter file name (e.g. 'app.py'):")
587
+ if st.button("Add Code"):
588
+ add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
589
+ st.success(add_code_status)
590
+
591
+ # Terminal Interface with Project Context
592
+ st.subheader("Terminal (Workspace Context)")
593
+ terminal_input = st.text_input("Enter a command within the workspace:")
594
+ if st.button("Run Command"):
595
+ terminal_output = terminal_interface(terminal_input, project_name)
596
+ st.code(terminal_output, language="bash")
597
+
598
+ # Chat Interface for Guidance
599
+ st.subheader("Chat with CodeCraft for Guidance")
600
+ chat_input = st.text_area("Enter your message for guidance:")
601
+ if st.button("Get Guidance"):
602
+ chat_response = chat_interface(chat_input)
603
+ st.session_state.chat_history.append((chat_input, chat_response))
604
+ st.write(f"CodeCraft: {chat_response}")
605
+
606
+ # Display Chat History
607
+ st.subheader("Chat History")
608
+ for user_input, response in st.session_state.chat_history:
609
+ st.write(f"User: {user_input}")
610
+ st.write(f"CodeCraft: {response}")
611
+
612
+ # Display Terminal History
613
+ st.subheader("Terminal History")
614
+ for command, output in st.session_state.terminal_history:
615
+ st.write(f"Command: {command}")
616
+ st.code(output, language="bash")
617
+
618
+ # Display Projects and Files
619
+ st.subheader("Workspace Projects")
620
+ for project, details in st.session_state.workspace_projects.items():
621
+ st.write(f"Project: {project}")
622
+ st.write("Files:")
623
+ for file in details["files"]:
624
+ st.write(f"- {file}")
625
  try:
626
  generator = pipeline("text-generation", model=model_name)
627
  generator.tokenizer.pad_token = generator.tokenizer.eos_token