import gradio as gr from transformers import pipeline import subprocess # Load your Hugging Face model model_name = "Canstralian/RedTeamAI" # Replace with your Hugging Face model path chatbot = pipeline("text-generation", model=model_name) def generate_response(prompt): """Generate a response using the Hugging Face model.""" try: response = chatbot(prompt, max_length=150, num_return_sequences=1) return response[0]["generated_text"] except Exception as e: return f"Model Error: {str(e)}" def execute_bash(command): """Execute a Bash command and return the output.""" try: result = subprocess.run(command, shell=True, capture_output=True, text=True) return result.stdout or result.stderr except Exception as e: return f"Execution Error: {str(e)}" def execute_python(script): """Execute a Python script dynamically.""" try: exec_globals = {} exec(script, exec_globals) return "Script executed successfully!" except Exception as e: return f"Python Execution Error: {str(e)}" def chatbot_interface(user_input, execution_mode, code=None): """Main interface logic for guiding and executing scripts.""" if execution_mode == "Guide": return generate_response(user_input) elif execution_mode == "Execute Bash": return execute_bash(user_input) elif execution_mode == "Execute Python": return execute_python(code) else: return "Invalid mode selected!" # Gradio Interface interface = gr.Interface( fn=chatbot_interface, inputs=[ gr.Textbox(label="Enter your query or script"), gr.Radio(["Guide", "Execute Bash", "Execute Python"], label="Mode"), gr.Textbox(label="Python Script (if applicable)", lines=10, optional=True) ], outputs="text", title="RedTeamAI Script Assistant", description="A GPT-powered chatbot to guide through and execute Bash and Python scripts." ) if __name__ == "__main__": interface.launch()