Spaces:
Build error
Build error
import os | |
import subprocess | |
import logging | |
import streamlit as st | |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, AutoModel, RagRetriever, AutoModelForSeq2SeqLM | |
import torch | |
from datetime import datetime | |
from huggingface_hub import hf_hub_url, cached_download, HfApi | |
from dotenv import load_dotenv | |
# Constants | |
HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit" | |
PROJECT_ROOT = "projects" | |
AGENT_DIRECTORY = "agents" | |
AVAILABLE_CODE_GENERATIVE_MODELS = [ | |
"bigcode/starcoder", # Popular and powerful | |
"Salesforce/codegen-350M-mono", # Smaller, good for quick tasks | |
"microsoft/CodeGPT-small", # Smaller, good for quick tasks | |
"google/flan-t5-xl", # Powerful, good for complex tasks | |
"facebook/bart-large-cnn", # Good for text-to-code tasks | |
] | |
# Load environment variables | |
load_dotenv() | |
HF_TOKEN = os.getenv("HUGGING_FACE_API_KEY") | |
# Initialize logger | |
logging.basicConfig(level=logging.INFO) | |
# Global state to manage communication between Tool Box and Workspace Chat App | |
if 'chat_history' not in st.session_state: | |
st.session_state.chat_history = [] | |
if 'terminal_history' not in st.session_state: | |
st.session_state.terminal_history = [] | |
if 'workspace_projects' not in st.session_state: | |
st.session_state.workspace_projects = {} | |
if 'available_agents' not in st.session_state: | |
st.session_state.available_agents = [] | |
if 'current_state' not in st.session_state: | |
st.session_state.current_state = { | |
'toolbox': {}, | |
'workspace_chat': {} | |
} | |
# Load pre-trained RAG retriever | |
rag_retriever = RagRetriever.from_pretrained("facebook/rag-token-base") # Use a Hugging Face RAG model | |
# Load pre-trained chat model | |
chat_model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/DialoGPT-medium") # Use a Hugging Face chat model | |
# Load tokenizer | |
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") | |
def process_input(user_input): | |
# Input pipeline: Tokenize and preprocess user input | |
input_ids = tokenizer(user_input, return_tensors="pt").input_ids | |
attention_mask = tokenizer(user_input, return_tensors="pt").attention_mask | |
# RAG model: Generate response | |
output = rag_retriever(input_ids, attention_mask=attention_mask) | |
response = output.generator_outputs[0].sequences[0] | |
# Chat model: Refine response | |
chat_input = tokenizer(response, return_tensors="pt") | |
chat_input["input_ids"] = chat_input["input_ids"].unsqueeze(0) | |
chat_input["attention_mask"] = chat_input["attention_mask"].unsqueeze(0) | |
output = chat_model(**chat_input) | |
refined_response = output.sequences[0] | |
# Output pipeline: Return final response | |
return refined_response | |
def workspace_interface(project_name): | |
project_path = os.path.join(PROJECT_ROOT, project_name) | |
if os.path.exists(project_path): | |
return f"Project '{project_name}' already exists." | |
else: | |
os.makedirs(project_path) | |
st.session_state.workspace_projects[project_name] = {'files': []} | |
return f"Project '{project_name}' created successfully." | |
def add_code_to_workspace(project_name, code, file_name): | |
project_path = os.path.join(PROJECT_ROOT, project_name) | |
if not os.path.exists(project_path): | |
return f"Project '{project_name}' does not exist." | |
file_path = os.path.join(project_path, file_name) | |
try: | |
with open(file_path, "w") as file: | |
file.write(code) | |
st.session_state.workspace_projects[project_name]['files'].append(file_name) | |
return f"Code added to '{file_name}' in project '{project_name}'." | |
except Exception as e: | |
logging.error(f"Error adding code: {file_name}: {e}") | |
return f"Error adding code: {file_name}" | |
def run_code(command, project_name=None): | |
if project_name: | |
project_path = os.path.join(PROJECT_ROOT, project_name) | |
result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_path) | |
else: | |
result = subprocess.run(command, shell=True, capture_output=True, text=True) | |
return result.stdout | |
def display_chat_history(history): | |
chat_history = "" | |
for user_input, response in history: | |
chat_history += f"User: {user_input}\nAgent: {response}\n\n" | |
return chat_history | |
def display_workspace_projects(projects): | |
workspace_projects = "" | |
for project, details in projects.items(): | |
workspace_projects += f"Project: {project}\nFiles:\n" | |
for file in details['files']: | |
workspace_projects += f" - {file}\n" | |
return workspace_projects | |
def download_models(): | |
for model in AVAILABLE_CODE_GENERATIVE_MODELS: | |
try: | |
cached_model = cached_download(model) | |
logging.info(f"Downloaded model '{model}' successfully.") | |
except Exception as e: | |
logging.error(f"Error downloading model '{model}': {e}") | |
def deploy_space_to_hf(project_name, hf_token): | |
repository_name = f"my-awesome-space_{datetime.now().timestamp()}" | |
files = get_built_space_files() | |
commit_response = deploy_to_git(project_name, repository_name, files) | |
if commit_response: | |
publish_space(repository_name, hf_token) | |
return f"Space '{repository_name}' deployed successfully." | |
else: | |
return "Failed to commit changes to Space." | |
def get_built_space_files(): | |
projects = st.session_state.workspace_projects | |
files = [] | |
for project in projects.values(): | |
for file in project['files']: | |
file_path = os.path.join(PROJECT_ROOT, project['project_name'], file) | |
with open(file_path, "rb") as file: | |
files.append(file.read()) | |
return files | |
def deploy_to_git(project_name, repository_name, files): | |
project_path = os.path.join(PROJECT_ROOT, project_name) | |
git_repo_url = hf_hub_url(repository_name) | |
git = subprocess.Popen(["git", "init"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=project_path) | |
git.communicate() | |
git = subprocess.Popen(["git", "add", "-A"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=project_path) | |
git.communicate() | |
for file in files: | |
filename = "temp.txt" | |
with open("temp.txt", "wb") as temp_file: | |
temp_file.write(file) | |
git = subprocess.Popen(["git", "add", filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=project_path) | |
git.communicate() | |
os.remove("temp.txt") | |
git = subprocess.Popen(["git", "commit", "-m", "Initial commit"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=project_path) | |
git.communicate() | |
return git.returncode == 0 | |
def publish_space(repository_name, hf_token): | |
api = HfApi(token=hf_token) | |
api.create_model(repository_name, files=[], push_to_hub=True) | |
def handle_autonomous_build(): | |
if not st.session_state.workspace_projects or not st.session_state.available_agents: | |
st.error("No projects or agents available to build.") | |
return | |
project_name = st.session_state.workspace_projects.keys()[0] | |
selected_agent = st.session_state.available_agents[0] | |
code_idea = st.session_state.current_state["workspace_chat"]["user_input"] | |
code_generative_model = next((model for model in AVAILABLE_CODE_GENERATIVE_MODELS if model in st.session_state.current_state["toolbox"]["selected_models"]), None) | |
if not code_generative_model: | |
st.error("No code-generative model selected.") | |
return | |
logging.info(f"Building project '{project_name}' with agent '{selected_agent}' and model '{code_generative_model}'.") | |
try: | |
# TODO: Add code to run the build process here | |
# This could include generating code, running it, and updating the workspace projects | |
# The build process should also update the UI with the build summary and next steps | |
summary, next_step = build_project(project_name, selected_agent, code_idea, code_generative_model) | |
st.write(f"Build summary: {summary}") | |
st.write(f"Next step: {next_step}") | |
if next_step == "Deploy to Hugging Face Hub": | |
deploy_response = deploy_space_to_hf(project_name, HF_TOKEN) | |
st.write(deploy_response) | |
except Exception as e: | |
logging.error(f"Error during build process: {e}") | |
st.error("Error during build process.") | |
def build_project(project_name, agent, code_idea, code_generative_model): | |
# TODO: Add code to build the project here | |
# This could include generating code, running it, and updating the workspace projects | |
# The build process should also return a summary and next step | |
summary = "Project built successfully." | |
next_step = "" | |
return summary, next_step | |
def main(): | |
# Initialize the app | |
st.title("AI Agent Creator") | |
# Sidebar navigation | |
st.sidebar.title("Navigation") | |
app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"]) | |
if app_mode == "AI Agent Creator": | |
# AI Agent Creator | |
st.header("Create an AI Agent from Text") | |
st.subheader("From Text") | |
agent_name = st.text_input("Enter agent name:") | |
text_input = st.text_area("Enter skills (one per line):") | |
if st.button("Create Agent"): | |
skills = text_input.split('\n') | |
try: | |
agent = AIAgent(agent_name, "AI agent created from text input", skills) | |
st.session_state.available_agents.append(agent_name) | |
st.success(f"Agent '{agent_name}' created and saved successfully.") | |
except Exception as e: | |
st.error(f"Error creating agent: {e}") | |
elif app_mode == "Tool Box": | |
# Tool Box | |
st.header("AI-Powered Tools") | |
# Chat Interface | |
st.subheader("Chat with CodeCraft") | |
chat_input = st.text_area("Enter your message:") | |
if st.button("Send"): | |
response = process_input(chat_input) | |
st.session_state.chat_history.append((chat_input, response)) | |
st.write(f"CodeCraft: {response}") | |
# Terminal Interface | |
st.subheader("Terminal") | |
terminal_input = st.text_input("Enter a command:") | |
if st.button("Run"): | |
output = run_code(terminal_input) | |
st.session_state.terminal_history.append((terminal_input, output)) | |
st.code(output, language="bash") | |
# Project Management | |
st.subheader("Project Management") | |
project_name_input = st.text_input("Enter Project Name:") | |
if st.button("Create Project"): | |
status = workspace_interface(project_name_input) | |
st.write(status) | |
code_to_add = st.text_area("Enter Code to Add to Workspace:", height=150) | |
file_name_input = st.text_input("Enter File Name (e.g., 'app.py'):") | |
if st.button("Add Code"): | |
status = add_code_to_workspace(project_name_input, code_to_add, file_name_input) | |
st.write(status) | |
# Display Chat History | |
st.subheader("Chat History") | |
chat_history = display_chat_history(st.session_state.chat_history) | |
st.text_area("Chat History", value=chat_history, height=200) | |
# Display Workspace Projects | |
st.subheader("Workspace Projects") | |
workspace_projects = display_workspace_projects(st.session_state.workspace_projects) | |
st.text_area("Workspace Projects", value=workspace_projects, height=200) | |
# Download and deploy models | |
if st.button("Download and Deploy Models"): | |
download_models() | |
st.info("Models downloaded and deployed.") | |
elif app_mode == "Workspace Chat App": | |
# Workspace Chat App | |
st.header("Workspace Chat App") | |
# Chat Interface with AI Agents | |
st.subheader("Chat with AI Agents") | |
selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents) | |
agent_chat_input = st.text_area("Enter your message for the agent:") | |
if st.button("Send to Agent"): | |
response = process_input(agent_chat_input) | |
st.session_state.chat_history.append((agent_chat_input, response)) | |
st.write(f"{selected_agent}: {response}") | |
# Code Generation | |
st.subheader("Code Generation") | |
code_idea = st.text_input("Enter your code idea:") | |
selected_model = st.selectbox("Select a code-generative model", AVAILABLE_CODE_GENERATIVE_MODELS) | |
if st.button("Generate Code"): | |
generated_code = run_code(code_idea) | |
st.code(generated_code, language="python") | |
# Autonomous build process | |
if st.button("Automate Build Process"): | |
handle_autonomous_build() | |
if __name__ == "__main__": | |
main() |