RAM-P-App / app.py
Harshit0414's picture
Update app.py
a570d68 verified
"""
Frontend Gradio App for RAM-P (PUBLIC)
This is the public-facing UI that communicates with the private backend via API.
Deploy this as a PUBLIC Hugging Face Space.
"""
import gradio as gr
import os
from gradio_client import Client
# Get backend URL and token from environment variables
BACKEND_URL = os.getenv("BACKEND_URL", "") # e.g., "https://username-backend.hf.space"
HF_TOKEN = os.getenv("HF_TOKEN", "") # Hugging Face token for authentication
if not BACKEND_URL:
raise ValueError("BACKEND_URL environment variable must be set!")
if not HF_TOKEN:
raise ValueError("HF_TOKEN environment variable must be set!")
# Initialize backend client
try:
# For private Spaces, pass token via headers or use HF_TOKEN environment variable
# Gradio Client automatically uses HF_TOKEN env var if available
if HF_TOKEN:
# Set as environment variable for gradio-client to pick up
os.environ["HF_TOKEN"] = HF_TOKEN
backend_client = Client(BACKEND_URL)
else:
backend_client = Client(BACKEND_URL)
print(f"Connected to backend at {BACKEND_URL}")
# Debug: List available API endpoints
try:
api_info = backend_client.view_api()
print(f"Available API endpoints: {list(api_info.keys()) if isinstance(api_info, dict) else 'Could not list APIs'}")
except:
pass
except Exception as e:
print(f"Warning: Could not connect to backend: {e}")
print(f"Make sure BACKEND_URL and HF_TOKEN are set correctly.")
print(f"BACKEND_URL: {BACKEND_URL}")
print(f"HF_TOKEN set: {bool(HF_TOKEN)}")
backend_client = None
def add_sentences_ui(sentences_text):
"""UI handler for adding sentences."""
if not backend_client:
return "❌ Backend not available. Please check configuration.", "**Error:** Backend connection failed."
try:
result = backend_client.predict(
sentences_text,
api_name="/api_add_sentences"
)
if isinstance(result, dict):
vocab_info = result.get("vocab_info", {})
vocab_text = f"**Current Vocabulary:** {vocab_info.get('vocab_size', 0)} words\n**Corpus:** {vocab_info.get('corpus_size', 0)} sentences\n**Trained:** {vocab_info.get('trained_size', 0)} sentences"
return result.get("status", "Unknown status"), vocab_text
else:
return str(result), "**Error:** Unexpected response format."
except Exception as e:
return f"❌ Error: {str(e)}", "**Error:** Could not connect to backend."
def train_brain_ui(epochs, progress=gr.Progress()):
"""UI handler for training."""
if not backend_client:
yield "❌ Backend not available. Please check configuration."
return
try:
yield "πŸ”„ Training in progress... Please wait..."
result = backend_client.predict(
int(epochs),
api_name="/api_train"
)
if isinstance(result, str):
yield result
elif isinstance(result, dict):
yield result.get("status", "Training completed.")
else:
yield str(result)
except Exception as e:
yield f"❌ Error: {str(e)}"
def run_stream_ui(seed_word, steps, coupling_gain, transmission_threshold):
"""UI handler for stream simulation."""
if not backend_client:
return None, "❌ Backend not available. Please check configuration."
try:
result = backend_client.predict(
seed_word,
int(steps),
float(coupling_gain),
float(transmission_threshold),
api_name="/api_run_stream"
)
# Gradio returns tuple/list for multiple outputs
if isinstance(result, (list, tuple)) and len(result) >= 2:
return result[0], result[1]
elif isinstance(result, dict):
return result.get("image"), result.get("text", "")
else:
return None, f"Unexpected response: {result}"
except Exception as e:
return None, f"❌ Error: {str(e)}"
def clear_brain_ui():
"""UI handler for clearing the brain."""
if not backend_client:
return "❌ Backend not available. Please check configuration.", "**Error:** Backend connection failed."
try:
result = backend_client.predict(api_name="/api_clear_brain")
if isinstance(result, dict):
vocab_info = result.get("vocab_info", {})
vocab_text = f"**Current Vocabulary:** {vocab_info.get('vocab_size', 0)} words\n**Corpus:** {vocab_info.get('corpus_size', 0)} sentences\n**Trained:** {vocab_info.get('trained_size', 0)} sentences"
return result.get("status", "Cleared."), vocab_text
else:
return str(result), "**Error:** Unexpected response format."
except Exception as e:
return f"❌ Error: {str(e)}", "**Error:** Could not connect to backend."
# Create Frontend Interface
with gr.Blocks(title="RAM-P - Interactive Learning") as frontend_app:
gr.Markdown("""
# 🧠 RAM-P - Interactive Learning
**Start with a blank brain and teach it by adding sentences!**
### How to use:
1. **Add Sentences**: Input sentences (one per line) to build vocabulary and corpus
2. **Train Brain**: Click "Train Brain" to let it learn associations from your sentences
3. **Run Stream**: Enter a seed word and watch the stream of consciousness flow!
""")
with gr.Tabs():
with gr.Tab("1. Add Sentences"):
gr.Markdown("### Add sentences to teach the brain")
gr.Markdown("Enter sentences (one per line). The brain will extract vocabulary from these sentences.")
sentences_input = gr.Textbox(
label="Sentences",
placeholder="the monkey ate a banana\nprogrammer wrote code\nastronomer saw stars",
lines=10,
info="Enter sentences, one per line"
)
add_btn = gr.Button("Add Sentences", variant="primary")
add_output = gr.Textbox(label="Status", interactive=False)
vocab_display = gr.Markdown(label="Vocabulary Info")
add_btn.click(
fn=add_sentences_ui,
inputs=sentences_input,
outputs=[add_output, vocab_display]
)
with gr.Tab("2. Train Brain"):
gr.Markdown("### Train the brain on your corpus")
gr.Markdown("The brain will learn associations between words that appear together in sentences. **Incremental learning**: Adding new sentences expands the brain without losing previous knowledge.")
with gr.Row():
with gr.Column(scale=2):
epochs_slider = gr.Slider(
label="Training Epochs",
minimum=1,
maximum=10,
value=2,
step=1,
info="Number of times to go through the corpus"
)
train_btn = gr.Button("Train Brain", variant="primary", size="lg")
train_output = gr.Markdown(label="Training Status", value="Ready to train. Click 'Train Brain' to start.")
train_btn.click(
fn=train_brain_ui,
inputs=epochs_slider,
outputs=train_output
)
with gr.Column(scale=1):
gr.Markdown("### Brain Management")
clear_btn = gr.Button("Clear Brain", variant="stop", size="lg")
clear_output = gr.Markdown(label="Clear Status")
clear_vocab_display = gr.Markdown(label="Vocabulary Info")
clear_btn.click(
fn=clear_brain_ui,
inputs=None,
outputs=[clear_output, clear_vocab_display]
)
with gr.Tab("3. Stream of Consciousness"):
gr.Markdown("### Run stream of consciousness simulation")
gr.Markdown("Enter a seed word and watch how the brain's thoughts flow and associate.")
with gr.Row():
with gr.Column(scale=1):
seed_word_input = gr.Textbox(
label="Seed Word",
value="",
placeholder="Enter a word from your vocabulary...",
info="The initial concept to inject"
)
steps_slider = gr.Slider(
label="Simulation Steps",
minimum=100,
maximum=1000,
value=400,
step=50
)
coupling_slider = gr.Slider(
label="Coupling Gain",
minimum=0.0,
maximum=200.0,
value=80.0,
step=5.0,
info="How strongly thoughts pull on each other"
)
threshold_slider = gr.Slider(
label="Transmission Threshold",
minimum=0.01,
maximum=0.5,
value=0.05,
step=0.01,
info="Minimum activation for influence"
)
stream_btn = gr.Button("Run Stream", variant="primary", size="lg")
with gr.Column(scale=2):
stream_image = gr.Image(
label="Stream of Consciousness Visualization",
type="filepath"
)
stream_text = gr.Markdown(label="Narrative Chain")
stream_btn.click(
fn=run_stream_ui,
inputs=[seed_word_input, steps_slider, coupling_slider, threshold_slider],
outputs=[stream_image, stream_text]
)
gr.Markdown("""
---
**Tips:**
- Add diverse sentences to build a rich vocabulary
- More training epochs = stronger associations
- Try different seed words to see different thought patterns
""")
if __name__ == "__main__":
# Let Gradio use default port (7860) or GRADIO_SERVER_PORT env var
# Don't hardcode port - let Hugging Face Spaces handle it
frontend_app.launch()