acecalisto3 commited on
Commit
1115ab9
1 Parent(s): 835d56a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +444 -269
app.py CHANGED
@@ -1,281 +1,456 @@
1
  import os
2
  import subprocess
3
- import streamlit as st
4
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
5
- from langchain_community.llms import HuggingFaceHub
6
- from langchain_community.embeddings import HuggingFaceHubEmbeddings
7
- from langchain_community.document_loaders import PyPDFLoader
8
- from langchain_community.vectorstores import FAISS
9
- from langchain.chains import ConversationalRetrievalChain
10
- from langchain.chains.question_answering import load_qa_chain
11
- from llama_cpp import Llama, LlamaCppPythonProvider, LlamaCppAgent
12
- from llama_cpp.llama_cpp_agent import get_messages_formatter_type, get_context_by_model
13
- from io import StringIO
14
- import tempfile
15
- import importlib
16
- import time
17
- import sys
18
- from flask import Flask
19
-
20
- # --- Global Variables ---
21
- CURRENT_PROJECT = {} # Store project data (code, packages, etc.)
22
- MODEL_OPTIONS = {
23
- "CodeQwen": "Qwen/CodeQwen1.5-7B-Chat-GGUF",
24
- "Codestral": "bartowski/Codestral-22B-v0.1-GGUF",
25
- "AutoCoder": "bartowski/AutoCoder-GGUF",
26
- }
27
- MODEL_FILENAMES = {
28
- "CodeQwen": "codeqwen-1_5-7b-chat-q6_k.gguf",
29
- "Codestral": "Codestral-22B-v0.1-Q6_K.gguf",
30
- "AutoCoder": "AutoCoder-Q6_K.gguf",
31
- }
32
- HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
33
- PROJECT_ROOT = "projects"
34
- AGENT_DIRECTORY = "agents"
35
-
36
- # Global state to manage communication between Tool Box and Workspace Chat App
37
- if 'chat_history' not in st.session_state:
38
- st.session_state.chat_history = []
39
- if 'terminal_history' not in st.session_state:
40
- st.session_state.terminal_history = []
41
- if 'workspace_projects' not in st.session_state:
42
- st.session_state.workspace_projects = {}
43
- if 'available_agents' not in st.session_state:
44
- st.session_state.available_agents = []
45
- if 'current_state' not in st.session_state:
46
- st.session_state.current_state = {
47
- 'toolbox': {},
48
- 'workspace_chat': {}
49
- }
50
-
51
- # --- Load NLP Pipelines ---
52
- classifier = pipeline("text-classification", model="facebook/bart-large-mnli")
53
-
54
- # --- Load the model and tokenizer ---
55
- model = AutoModelForCausalLM.from_pretrained(
56
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
57
- use_auth_token=os.environ.get("huggingface_token")
58
  )
59
- tokenizer = AutoTokenizer.from_pretrained(
60
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
61
- use_auth_token=os.environ.get("huggingface_token")
 
 
 
 
62
  )
63
 
64
- # --- Utility Functions ---
65
- def install_and_import(package_name):
66
- """Installs a package using pip and imports it."""
67
- subprocess.check_call(["pip", "install", package_name])
68
- return importlib.import_module(package_name)
69
-
70
- def extract_package_name(input_str):
71
- """Extracts the package name from a PyPI URL or pip command."""
72
- if input_str.startswith("https://pypi.org/project/"):
73
- return input_str.split("/")[-2]
74
- elif input_str.startswith("pip install "):
75
- return input_str.split(" ")[2]
76
- else:
77
- return input_str
78
-
79
- def create_interface_from_input(input_str):
80
- """Creates a Gradio interface with buttons for functions from a package."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  try:
82
- package_name = extract_package_name(input_str)
83
- module = install_and_import(package_name)
84
-
85
- # Handle Flask application context if needed
86
- if 'flask' in sys.modules or 'flask_restful' in sys.modules:
87
- app = Flask(__name__)
88
- with app.app_context():
89
- functions = [getattr(module, name) for name in dir(module) if callable(getattr(module, name))]
 
 
 
90
  else:
91
- functions = [getattr(module, name) for name in dir(module) if callable(getattr(module, name))]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
- function_list = [(func.__name__, func) for func in functions if not func.__name__.startswith("_")]
94
- return function_list, f"Interface for `{package_name}` created."
95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  except Exception as e:
97
- return [], str(e)
98
 
99
- def execute_pip_command(command, add_message):
100
- """Executes a pip command and streams the output."""
101
- process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
 
 
 
 
 
 
 
 
 
 
 
 
102
  while True:
103
- output = process.stdout.readline()
104
- if output == '' and process.poll() is not None:
105
- break
106
- if output:
107
- # Corrected line: Combine the f-string parts
108
- add_message("System", f"'"{output.strip()}")
109
- time.sleep(0.1) # Simulate delay for more realistic streaming
110
- rc = process.poll()
111
- return rc
112
- def generate_text(input_text):
113
- """Generates text using the loaded language model."""
114
- inputs = tokenizer(input_text, return_tensors="pt")
115
- output = model.generate(**inputs, max_length=500, num_return_sequences=1)
116
- return tokenizer.decode(output[0], skip_special_tokens=True)
117
- # --- AI Agent Functions ---
118
- def analyze_user_intent(user_input):
119
- """Classifies the user's intent based on their input."""
120
- classification = classifier(user_input)
121
- return classification[0]['label']
122
- def generate_mini_app_ideas(theme):
123
- """Generates mini-app ideas based on the user's theme."""
124
- if theme.lower() == "productivity":
125
- return [
126
- "Idea-to-Codebase Generator",
127
- "Automated GitHub Repo Manager",
128
- "AI-Powered IDE"
129
- ]
130
- elif theme.lower() == "creativity":
131
- return [
132
- "Brainstorming Assistant",
133
- "Mood Board Generator",
134
- "Writing Assistant"
135
- ]
136
- elif theme.lower() == "well-being":
137
- return [
138
- "Meditation Guide",
139
- "Mood Tracker",
140
- "Sleep Tracker"
141
- ]
142
- else:
143
- return ["No matching mini-apps found. Try a different theme."]
144
- def generate_app_code(app_name, app_description, model_name, history):
145
- """Generates code for the selected mini-app using the specified GGUF model."""
146
- prompt = f"'"Write a Python script for a {app_description} named {app_name} using Gradio and Streamlit:"
147
- agent = get_agent(model_name)
148
- generated_code = agent.chat(prompt, history)
149
- return generated_code
150
- def execute_terminal_command(command):
151
- """Executes a terminal command and returns the output."""
152
- try:
153
- result = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT, universal_newlines=True)
154
- return result.strip(), None
155
- except subprocess.CalledProcessError as e:
156
- return e.output.strip(), str(e)
157
- def install_package(package_name):
158
- """Installs a package using pip."""
159
- output, error = execute_terminal_command(f"'"pip install {package_name}")
160
- if error:
161
- return f"'"Error installing package: {error}"
162
- else:
163
- return f"'"Package `{package_name}` installed successfully."
164
- def get_project_data():
165
- """Returns the current project data."""
166
- return CURRENT_PROJECT
167
- def update_project_data(key, value):
168
- """Updates the project data."""
169
- CURRENT_PROJECT[key] = value
170
- def handle_chat(input_text, history):
171
- """Handles user input in the chat interface."""
172
- def add_message(sender, message):
173
- history.append((sender, message))
174
- add_message("User", input_text)
175
- if input_text.startswith("pip install ") or input_text.startswith("https://pypi.org/project/"):
176
- package_name = extract_package_name(input_text)
177
- add_message("System", f"'"Installing `{package_name}`...")
178
- result = install_package(package_name)
179
- add_message("System", result)
180
- update_project_data("packages", CURRENT_PROJECT.get("packages", []) + [package_name])
181
- return history, dynamic_functions
182
- # --- AI Agent Interaction ---
183
- if USER_INTENT is None:
184
- add_message("System", analyze_user_intent(input_text))
185
- add_message("System", "What kind of mini-app do you have in mind?")
186
- elif not MINI_APPS:
187
- add_message("System", "Here are some ideas:")
188
- for idea in generate_mini_app_ideas(input_text):
189
- add_message("System", f"'"-{idea}")
190
- add_message("System", "Which one would you like to build?")
191
- elif CURRENT_APP["name"] is None:
192
- selected_app = input_text
193
- app_description = next((app for app in MINI_APPS if selected_app in app), None)
194
- if app_description:
195
- add_message("System", f"'"Generating code for {app_description}...")
196
- code = generate_app_code(selected_app, app_description, "CodeQwen", history) # Use CodeQwen by default
197
- add_message("System", f"'"
198
- python\n{code}\n
199
- add_message("System", "Code generated! What else can I do for you?")
200
- update_project_data("code", code)
201
- update_project_data("app_name", selected_app)
202
- update_project_data("app_description", app_description)
203
- else:
204
- add_message("System", "Please choose from the provided mini-app ideas.")
205
- else:
206
- add_message("System", "You already have an app in progress. Do you want to start over?")
207
- return history, dynamic_functions
208
- # --- Prebuilt Tools ---
209
- def generate_code_tool(input_text, history):
210
- """Prebuilt tool for code generation."""
211
- code = generate_app_code("MyTool", "A tool to do something", "CodeQwen", history) # Use CodeQwen by default
212
- return f"'" python {code}"
213
- def analyze_code_tool(input_text, history):
214
- """Prebuilt tool for code analysis."""
215
- agent = get_agent("Codestral")
216
- analysis = agent.chat(input_text, history)
217
- return analysis
218
- # --- Streamlit Interface ---
219
- st.title("AI4ME: Your Personal AI App Workshop")
220
- st.markdown("## Let's build your dream app together! 🤖")
221
- # --- Hugging Face Token Input ---
222
- huggingface_token = st.text_input("Enter your Hugging Face Token", type="password", key="huggingface_token")
223
- os.environ["huggingface_token"] = huggingface_token
224
- # --- Chat Interface ---
225
- chat_history = []
226
- chat_input = st.text_input("Tell me your idea...", key="chat_input")
227
- if chat_input:
228
- chat_history, dynamic_functions = handle_chat(chat_input, chat_history)
229
- for sender, message in chat_history:
230
- st.markdown(f"**{sender}:** {message}")
231
- # --- Code Execution and Deployment ---
232
- if CURRENT_APP["code"]:
233
- st.markdown("## Your App Code:")
234
- code_area = st.text_area("Your App Code", value=CURRENT_APP["code"], key="code_area")
235
- st.markdown("## Deploy Your App (Coming Soon!)")
236
- # Add deployment functionality here using Streamlit's deployment features.
237
- # For example, you could use Streamlit's `st.button` to trigger deployment.
238
- # --- Code Execution ---
239
- st.markdown("## Run Your App:")
240
- if st.button("Execute Code"):
241
- try:
242
- # Use Hugging Face's text-generation pipeline for code execution
243
- inputs = tokenizer(code_area, return_tensors="pt")
244
- output = model.generate(**inputs, max_length=500, num_return_sequences=1)
245
- output = tokenizer.decode(output[0], skip_special_tokens=True)
246
- st.success(f"'"Code executed successfully!{output}")
247
- except Exception as e:
248
- st.error(f"'"Error executing code: {e}")
249
- # --- Code Editing ---
250
- st.markdown("## Edit Your Code:")
251
- if st.button("Edit Code"):
252
- try:
253
- # Use Hugging Face's text-generation pipeline for code editing
254
- prompt = f"'"Improve the following Python code: python {code_area}"
255
- inputs = tokenizer(prompt, return_tensors="pt")
256
- output = model.generate(**inputs, max_length=500, num_return_sequences=1)
257
- edited_code = tokenizer.decode(output[0], skip_special_tokens=True).split("
258
- python\n")[1].split("\n
259
- st.success(f"'"Code edited successfully!\n{edited_code}")
260
- update_project_data("code", edited_code)
261
- code_area.value = edited_code
262
- except Exception as e:
263
- st.error(f"'"Error editing code: {e}")
264
- # --- Prebuilt Tools ---
265
- st.markdown("## Prebuilt Tools:")
266
- with st.expander("Generate Code"):
267
- code_input = st.text_area("Enter your code request:", key="code_input")
268
- if st.button("Generate"):
269
- code_output = generate_code_tool(code_input, chat_history)
270
- st.markdown(code_output)
271
- with st.expander("Analyze Code"):
272
- code_input = st.text_area("Enter your code:", key="analyze_code_input")
273
- if st.button("Analyze"):
274
- analysis_output = analyze_code_tool(code_input, chat_history)
275
- st.markdown(analysis_output)
276
- # --- Additional Features ---
277
- # Add features like:
278
- # - Code editing
279
- # - Integration with external APIs
280
- # - Advanced AI agents for more complex tasks
281
- # - User account management
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import subprocess
3
+ import random
4
+ from huggingface_hub import InferenceClient
5
+ import gradio as gr
6
+ from safe_search import safe_search
7
+ from i_search import google
8
+ from i_search import i_search as i_s
9
+ from agent import (
10
+ ACTION_PROMPT,
11
+ ADD_PROMPT,
12
+ COMPRESS_HISTORY_PROMPT,
13
+ LOG_PROMPT,
14
+ LOG_RESPONSE,
15
+ MODIFY_PROMPT,
16
+ PREFIX,
17
+ SEARCH_QUERY,
18
+ READ_PROMPT,
19
+ TASK_PROMPT,
20
+ UNDERSTAND_TEST_RESULTS_PROMPT,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  )
22
+ from utils import parse_action, parse_file_content, read_python_module_structure
23
+ from datetime import datetime
24
+ now = datetime.now()
25
+ date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
26
+
27
+ client = InferenceClient(
28
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
29
  )
30
 
31
+ ############################################
32
+
33
+
34
+ VERBOSE = True
35
+ MAX_HISTORY = 100
36
+ #MODEL = "gpt-3.5-turbo" # "gpt-4"
37
+
38
+
39
+ def format_prompt(message, history):
40
+ prompt = "<s>"
41
+ for user_prompt, bot_response in history:
42
+ prompt += f"[INST] {user_prompt} [/INST]"
43
+ prompt += f" {bot_response}</s> "
44
+ prompt += f"[INST] {message} [/INST]"
45
+ return prompt
46
+
47
+
48
+
49
+ def run_gpt(
50
+ prompt_template,
51
+ stop_tokens,
52
+ max_tokens,
53
+ purpose,
54
+ **prompt_kwargs,
55
+ ):
56
+ seed = random.randint(1,1111111111111111)
57
+ print (seed)
58
+ generate_kwargs = dict(
59
+ temperature=1.0,
60
+ max_new_tokens=2096,
61
+ top_p=0.99,
62
+ repetition_penalty=1.0,
63
+ do_sample=True,
64
+ seed=seed,
65
+ )
66
+
67
+
68
+ content = PREFIX.format(
69
+ date_time_str=date_time_str,
70
+ purpose=purpose,
71
+ safe_search=safe_search,
72
+ ) + prompt_template.format(**prompt_kwargs)
73
+ if VERBOSE:
74
+ print(LOG_PROMPT.format(content))
75
+
76
+
77
+ #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
78
+ #formatted_prompt = format_prompt(f'{content}', history)
79
+
80
+ stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
81
+ resp = ""
82
+ for response in stream:
83
+ resp += response.token.text
84
+
85
+ if VERBOSE:
86
+ print(LOG_RESPONSE.format(resp))
87
+ return resp
88
+
89
+
90
+ def compress_history(purpose, task, history, directory):
91
+ resp = run_gpt(
92
+ COMPRESS_HISTORY_PROMPT,
93
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
94
+ max_tokens=512,
95
+ purpose=purpose,
96
+ task=task,
97
+ history=history,
98
+ )
99
+ history = "observation: {}\n".format(resp)
100
+ return history
101
+
102
+ def call_search(purpose, task, history, directory, action_input):
103
+ print("CALLING SEARCH")
104
  try:
105
+
106
+ if "http" in action_input:
107
+ if "<" in action_input:
108
+ action_input = action_input.strip("<")
109
+ if ">" in action_input:
110
+ action_input = action_input.strip(">")
111
+
112
+ response = i_s(action_input)
113
+ #response = google(search_return)
114
+ print(response)
115
+ history += "observation: search result is: {}\n".format(response)
116
  else:
117
+ history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
118
+ except Exception as e:
119
+ history += "observation: {}'\n".format(e)
120
+ return "MAIN", None, history, task
121
+
122
+ def call_main(purpose, task, history, directory, action_input):
123
+ resp = run_gpt(
124
+ ACTION_PROMPT,
125
+ stop_tokens=["observation:", "task:", "action:","thought:"],
126
+ max_tokens=2096,
127
+ purpose=purpose,
128
+ task=task,
129
+ history=history,
130
+ )
131
+ lines = resp.strip().strip("\n").split("\n")
132
+ for line in lines:
133
+ if line == "":
134
+ continue
135
+ if line.startswith("thought: "):
136
+ history += "{}\n".format(line)
137
+ elif line.startswith("action: "):
138
+
139
+ action_name, action_input = parse_action(line)
140
+ print (f'ACTION_NAME :: {action_name}')
141
+ print (f'ACTION_INPUT :: {action_input}')
142
+
143
+ history += "{}\n".format(line)
144
+ if "COMPLETE" in action_name or "COMPLETE" in action_input:
145
+ task = "END"
146
+ return action_name, action_input, history, task
147
+ else:
148
+ return action_name, action_input, history, task
149
+ else:
150
+ history += "{}\n".format(line)
151
+ #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
152
+
153
+ #return action_name, action_input, history, task
154
+ #assert False, "unknown action: {}".format(line)
155
+ return "MAIN", None, history, task
156
+
157
+
158
+ def call_set_task(purpose, task, history, directory, action_input):
159
+ task = run_gpt(
160
+ TASK_PROMPT,
161
+ stop_tokens=[],
162
+ max_tokens=64,
163
+ purpose=purpose,
164
+ task=task,
165
+ history=history,
166
+ ).strip("\n")
167
+ history += "observation: task has been updated to: {}\n".format(task)
168
+ return "MAIN", None, history, task
169
+
170
+ def end_fn(purpose, task, history, directory, action_input):
171
+ task = "END"
172
+ return "COMPLETE", "COMPLETE", history, task
173
+
174
+ NAME_TO_FUNC = {
175
+ "MAIN": call_main,
176
+ "UPDATE-TASK": call_set_task,
177
+ "SEARCH": call_search,
178
+ "COMPLETE": end_fn,
179
 
180
+ }
 
181
 
182
+ def run_action(purpose, task, history, directory, action_name, action_input):
183
+ print(f'action_name::{action_name}')
184
+ try:
185
+ if "RESPONSE" in action_name or "COMPLETE" in action_name:
186
+ action_name="COMPLETE"
187
+ task="END"
188
+ return action_name, "COMPLETE", history, task
189
+
190
+ # compress the history when it is long
191
+ if len(history.split("\n")) > MAX_HISTORY:
192
+ if VERBOSE:
193
+ print("COMPRESSING HISTORY")
194
+ history = compress_history(purpose, task, history, directory)
195
+ if not action_name in NAME_TO_FUNC:
196
+ action_name="MAIN"
197
+ if action_name == "" or action_name == None:
198
+ action_name="MAIN"
199
+ assert action_name in NAME_TO_FUNC
200
+
201
+ print("RUN: ", action_name, action_input)
202
+ return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
203
  except Exception as e:
204
+ history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
205
 
206
+ return "MAIN", None, history, task
207
+
208
+ def run(purpose,history):
209
+
210
+ #print(purpose)
211
+ #print(hist)
212
+ task=None
213
+ directory="./"
214
+ if history:
215
+ history=str(history).strip("[]")
216
+ if not history:
217
+ history = ""
218
+
219
+ action_name = "UPDATE-TASK" if task is None else "MAIN"
220
+ action_input = None
221
  while True:
222
+ print("")
223
+ print("")
224
+ print("---")
225
+ print("purpose:", purpose)
226
+ print("task:", task)
227
+ print("---")
228
+ print(history)
229
+ print("---")
230
+
231
+ action_name, action_input, history, task = run_action(
232
+ purpose,
233
+ task,
234
+ history,
235
+ directory,
236
+ action_name,
237
+ action_input,
238
+ )
239
+ yield (history)
240
+ #yield ("",[(purpose,history)])
241
+ if task == "END":
242
+ return (history)
243
+ #return ("", [(purpose,history)])
244
+
245
+
246
+
247
+ ################################################
248
+
249
+ def format_prompt(message, history):
250
+ prompt = "<s>"
251
+ for user_prompt, bot_response in history:
252
+ prompt += f"[INST] {user_prompt} [/INST]"
253
+ prompt += f" {bot_response}</s> "
254
+ prompt += f"[INST] {message} [/INST]"
255
+ return prompt
256
+ agents =[
257
+ "WEB_DEV",
258
+ "AI_SYSTEM_PROMPT",
259
+ "PYTHON_CODE_DEV"
260
+ ]
261
+ def generate(
262
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
263
+ ):
264
+ seed = random.randint(1,1111111111111111)
265
+
266
+ agent=prompts.WEB_DEV
267
+ if agent_name == "WEB_DEV":
268
+ agent = prompts.WEB_DEV
269
+ if agent_name == "AI_SYSTEM_PROMPT":
270
+ agent = prompts.AI_SYSTEM_PROMPT
271
+ if agent_name == "PYTHON_CODE_DEV":
272
+ agent = prompts.PYTHON_CODE_DEV
273
+ system_prompt=agent
274
+ temperature = float(temperature)
275
+ if temperature < 1e-2:
276
+ temperature = 1e-2
277
+ top_p = float(top_p)
278
+
279
+ generate_kwargs = dict(
280
+ temperature=temperature,
281
+ max_new_tokens=max_new_tokens,
282
+ top_p=top_p,
283
+ repetition_penalty=repetition_penalty,
284
+ do_sample=True,
285
+ seed=seed,
286
+ )
287
+
288
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
289
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
290
+ output = ""
291
+
292
+ for response in stream:
293
+ output += response.token.text
294
+ yield output
295
+ return output
296
+
297
+
298
+ additional_inputs=[
299
+ gr.Dropdown(
300
+ label="Agents",
301
+ choices=[s for s in agents],
302
+ value=agents[0],
303
+ interactive=True,
304
+ ),
305
+ gr.Textbox(
306
+ label="System Prompt",
307
+ max_lines=1,
308
+ interactive=True,
309
+ ),
310
+ gr.Slider(
311
+ label="Temperature",
312
+ value=0.9,
313
+ minimum=0.0,
314
+ maximum=1.0,
315
+ step=0.05,
316
+ interactive=True,
317
+ info="Higher values produce more diverse outputs",
318
+ ),
319
+
320
+ gr.Slider(
321
+ label="Max new tokens",
322
+ value=1048*10,
323
+ minimum=0,
324
+ maximum=1048*10,
325
+ step=64,
326
+ interactive=True,
327
+ info="The maximum numbers of new tokens",
328
+ ),
329
+ gr.Slider(
330
+ label="Top-p (nucleus sampling)",
331
+ value=0.90,
332
+ minimum=0.0,
333
+ maximum=1,
334
+ step=0.05,
335
+ interactive=True,
336
+ info="Higher values sample more low-probability tokens",
337
+ ),
338
+ gr.Slider(
339
+ label="Repetition penalty",
340
+ value=1.2,
341
+ minimum=1.0,
342
+ maximum=2.0,
343
+ step=0.05,
344
+ interactive=True,
345
+ info="Penalize repeated tokens",
346
+ ),
347
+
348
+
349
+ ]
350
+
351
+ examples=[
352
+ ["Create a basic Python web app using Flask.", None, None, None, None, None, ],
353
+ ["Build a simple Streamlit app to display a data visualization.", None, None, None, None, None, ],
354
+ ["I need a Gradio interface for a machine learning model that takes an image as input and outputs a classification.", None, None, None, None, None, ],
355
+ ["Generate a Python script to scrape data from a website.", None, None, None, None, None, ],
356
+ ["I'm building a React app. How can I use Axios to make API calls?", None, None, None, None, None, ],
357
+ ["Write a Python function to read data from a CSV file.", None, None, None, None, None, ],
358
+ ["I want to deploy my Flask app to Heroku.", None, None, None, None, None, ],
359
+ ["Explain the difference between Git and GitHub.", None, None, None, None, None, ],
360
+ ["How can I use Docker to containerize my Python app?", None, None, None, None, None, ],
361
+ ["I need a simple API endpoint for my web app using Flask.", None, None, None, None, None, ],
362
+ ["Create a function in Python to calculate the factorial of a number.", None, None, None, None, None, ],
363
+ ]
364
+
365
+ '''
366
+ gr.ChatInterface(
367
+ fn=run,
368
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
369
+ title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
370
+ examples=examples,
371
+ concurrency_limit=20,
372
+ with gr.Blocks() as ifacea:
373
+ gr.HTML("""TEST""")
374
+ ifacea.launch()
375
+ ).launch()
376
+ with gr.Blocks() as iface:
377
+ #chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
378
+ chatbot=gr.Chatbot()
379
+ msg = gr.Textbox()
380
+ with gr.Row():
381
+ submit_b = gr.Button()
382
+ clear = gr.ClearButton([msg, chatbot])
383
+ submit_b.click(run, [msg,chatbot],[msg,chatbot])
384
+ msg.submit(run, [msg, chatbot], [msg, chatbot])
385
+ iface.launch()
386
+ '''
387
+ gr.ChatInterface(
388
+ fn=run,
389
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
390
+ title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
391
+ examples=examples,
392
+ concurrency_limit=20,
393
+ ).launch(show_api=False)
394
+
395
+
396
+ Implementation of Next Steps:
397
+
398
+ Terminal Integration:
399
+
400
+ Install Libraries: Install either streamlit-terminal or gradio-terminal depending on your chosen framework.
401
+ Integrate the Terminal: Use the library's functions to embed a terminal component within your Streamlit or Gradio app.
402
+ Capture Input: Capture the user's input from the terminal and pass it to your command execution function.
403
+ Display Output: Display the output of the terminal commands, including both standard output and errors.
404
+ Code Generation:
405
+
406
+ LLM Selection: Choose a Hugging Face Transformer model that is suitable for code generation (e.g., google/flan-t5-xl, Salesforce/codet5-base, microsoft/CodeGPT-small).
407
+ Prompt Engineering: Develop effective prompts for the LLM to generate code based on natural language instructions.
408
+ Code Translation Function: Create a function that takes natural language input, passes it to the LLM with the appropriate prompt, and then returns the generated code.
409
+ Code Correction: You can explore ways to automatically correct code errors, perhaps using a combination of syntax checking and LLM assistance.
410
+ Workspace Explorer:
411
+
412
+ Streamlit or Gradio Filesystem Access: Use Streamlit's st.file_uploader or Gradio's gr.File component to allow users to upload files.
413
+ File Management: Implement functions to create, edit, and delete files and directories within the workspace.
414
+ Display Files: Use Streamlit's st.code or Gradio's gr.File component to display the contents of files in the workspace.
415
+ Directory Structure: Display the directory structure of the workspace using a tree-like representation.
416
+ Dependency Management:
417
+
418
+ Package Installation: Create a function that takes a package name as input, installs it using pip, and updates the requirements.txt file.
419
+ Workspace Population: Develop a function to create files and directories in the workspace based on installed packages.
420
+ Application Build and Launch:
421
+
422
+ Build Logic: Develop a function to build the web app based on the user's code and dependencies.
423
+ Launch Functionality: Implement a mechanism to launch the built app.
424
+ Error Correction: Identify and correct errors during the build and launch process.
425
+ Automated Assistance: Provide automated assistance during the build and launch process, with a gradient slider to adjust the level of user override.
426
+
427
+ Recommendations, Enhancements, Optimizations, and Workflow:
428
+
429
+ 1. LLM Selection for Code Generation:
430
+ * **Google/Flan-T5-XL:** Excellent for code generation, particularly for Python.
431
+ * **Salesforce/CodeT5-Base:** Strong for code generation, with a focus on code summarization and translation.
432
+ * **Microsoft/CodeGPT-Small:** A smaller model that is suitable for code generation tasks, especially if you have limited computational resources.
433
+
434
+ 2. Prompt Engineering for Code Generation:
435
+ * **Contextual Prompts:** Provide the LLM with as much context as possible, including the desired programming language, libraries, and any specific requirements.
436
+ * **Code Snippets:** If possible, include code snippets as part of the prompt to guide the LLM's code generation.
437
+ * **Iterative Refinement:** Use iterative prompting to refine the generated code. Start with a basic prompt and then provide feedback to the LLM to improve the code.
438
+
439
+ 3. Workspace Exploration:
440
+ * **Tree-Like View:** Use a tree-like representation to display the workspace's directory structure.
441
+ * **Search Functionality:** Implement a search bar to allow users to quickly find specific files or directories.
442
+ * **Code Highlighting:** Provide code highlighting for files in the workspace to improve readability.
443
+
444
+ 4. Dependency Management:
445
+ * **Virtual Environments:** Use virtual environments to isolate project dependencies and prevent conflicts.
446
+ * **Automatic Updates:** Implement a mechanism to automatically update dependencies when new versions are available.
447
+ * **Dependency Locking:** Use tools like `pip-tools` or `poetry` to lock dependencies to specific versions, ensuring consistent builds.
448
+
449
+ 5. Application Build and Launch:
450
+ * **Build Tool Integration:** Consider integrating a build tool like `poetry` or `pipenv` into your workflow to automate the build process.
451
+ * **Containerization:** Containerize the app using Docker to ensure consistent deployments across different environments.
452
+ * **Deployment Automation:** Explore tools like `Heroku`, `AWS Elastic Beanstalk`, or `Google App Engine` to automate the deployment process.
453
+
454
+ 6. Automated Assistance:
455
+ * **Error Detection and Correction:** Implement a system that can detect common coding errors and suggest corrections.
456
+ * **Code Completion:** Use an LLM to provide code completion suggestions as the user types.