Spaces:
Sleeping
Sleeping
File size: 9,090 Bytes
83a0c1c 1277e7e a6c95f0 aff7c4e 0070f2e 0edee28 1277e7e 1074e19 1277e7e 0070f2e cb34900 0070f2e 1277e7e 21f7a40 cb34900 1277e7e 1074e19 1277e7e 1074e19 cb34900 1277e7e 6e8313e cb34900 6e8313e cb34900 dddfef3 6e8313e cb34900 6e8313e dddfef3 21f7a40 dddfef3 6e8313e 1277e7e 1074e19 7c60db6 1074e19 7c60db6 1074e19 1277e7e 420c42c 1277e7e cb34900 1277e7e 420c42c 1277e7e 420c42c 7c60db6 1277e7e 420c42c 1277e7e c1374f7 1074e19 12463d7 a9c1355 1074e19 1277e7e 8ea64e7 1074e19 1277e7e cb34900 47e9656 1277e7e 6e8313e 1277e7e c1374f7 6e8313e 1074e19 1277e7e c1374f7 0ce9e39 c1374f7 1277e7e c1374f7 1277e7e 6e8313e 1074e19 36c9ec4 1074e19 379ab6b 1074e19 379ab6b 1074e19 c01a7a4 1074e19 1277e7e 1074e19 b7c4eb0 1074e19 b7c4eb0 1074e19 1277e7e b7c4eb0 1277e7e 1074e19 b7c4eb0 1277e7e b7c4eb0 1277e7e 1074e19 b7c4eb0 1277e7e b7c4eb0 1277e7e 1074e19 1277e7e 1074e19 1277e7e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 |
import os
import gradio as gr
from checks.status_check import is_endpoint_healthy
from checks.endpoint_utils import wake_endpoint
class ContentAgentUI:
"""
Gradio UI that:
- shows a minimal control panel first (status + Start button),
- auto-initializes the agent on load if the endpoint is already healthy,
- otherwise lets the user 'Start Agent' (wake -> health -> init),
- reveals the main chat panel (with header, guidance, examples, footer) after init.
"""
def __init__(self, endpoint_uri: str, is_healthy: bool, health_message: str, agent_initializer, agent_type:str, compute:str ):
self.endpoint_uri = endpoint_uri
self.is_healthy = bool(is_healthy)
self.health_message = health_message or ""
self.agent_initializer = agent_initializer # callable: (uri) -> CodeAgent
self.agent_type = agent_type or ""
self.compute = compute or ""
# set in build()
self.app: gr.Blocks | None = None
self.status_box = None
self.control_panel = None
self.main_panel = None
self.prompt = None
self.reply = None
self.agent_state = None
self.examples_radio = None
# ---------- helpers ----------
def _create_user_guidance(self):
gr.Markdown("""
Please enter text below to get started. Content Agent will try to determine whether the language is polite and uses the following classification:
- `polite`
- `somewhat polite`
- `neutral`
- `impolite`
Classificiation Scores
- Scoring runs from O to 1
""")
gr.Markdown(f"""
Technology:
- App is running `{self.agent_type}` text generation model.
- Agent uses Intel's Polite Guard NLP library tool
- Compute: {self.compute}
- Content Agent's LLM runs on-demand rather than using resources for 24 hrs/day, 7 days a week
""")
def _initial_status_text(self) -> str:
# neutral; on_load will set real status and maybe auto-init
return "Checking endpoint status…"
def _load_examples(self) -> list[str]:
print("load examples")
ex_dir = os.path.join(os.path.dirname(__file__), "examples")
out: list[str] = []
if os.path.isdir(ex_dir):
for name in sorted(os.listdir(ex_dir)):
if name.lower().endswith(".txt"):
p = os.path.join(ex_dir, name)
print(p)
try:
with open(p, "r", encoding="utf-8", errors="ignore") as f:
out.append(f.read())
except Exception:
pass
return out
# ---------- agent call ----------
@staticmethod
def _call_agent(text: str, agent) -> str:
try:
if agent is None:
return "Content Agent's LLM is sleeping and will need to be started. Click 'Start Agent'."
return str(agent.run(text)) # smolagents.CodeAgent API
except Exception as e:
return f"Error: {e}"
# ---------- UI build ----------
def build(self) -> gr.Blocks:
if self.app is not None:
return self.app
examples = self._load_examples()
with gr.Blocks() as demo:
# global header (always visible)
gr.Markdown("# Content Agent")
# Control panel (shown first; may auto-hide on load)
with gr.Group(visible=True) as self.control_panel:
self.status_box = gr.Textbox(
label="Status",
value=self._initial_status_text(),
lines=8,
interactive=False,
)
start_btn = gr.Button("Start Agent")
gr.Markdown("It may take up to 5 minutes to wake up the agent")
# Main panel (hidden until agent is initialized)
with gr.Group(visible=False) as self.main_panel:
# English only
strInput = "Content Input"
strPlaceholder="Copy and paste your content for evaluation here..."
strSubmit = "Submit"
strOutput = "Content feedback"
# Guidance / about
self._create_user_guidance()
# Chat controls
self.agent_state = gr.State(None)
self.prompt = gr.Textbox(label=strInput, placeholder=strPlaceholder)
self.reply = gr.Textbox(label=strOutput, interactive=False, lines=12, max_lines=20)
submit_btn = gr.Button(strSubmit)
# Use bound methods to submit content
submit_btn.click(self._call_agent, inputs=[self.prompt, self.agent_state], outputs=self.reply)
self.prompt.submit(self._call_agent, inputs=[self.prompt, self.agent_state], outputs=self.reply)
# Examples (optional)
gr.Markdown("### Try one of these examples")
if examples:
gr.Markdown("examples found")
self.examples_radio = gr.Radio(choices=examples, label="Examples")
# fill the prompt when an example is picked
self.examples_radio.change(lambda ex: ex, inputs=self.examples_radio, outputs=self.prompt)
else:
gr.Markdown("*No examples found.*")
# Footer
gr.Markdown("<div id='footer'>Thanks for trying it out!</div>")
# --- AUTO INIT ON LOAD IF HEALTHY ---
def on_load():
healthy, msg = is_endpoint_healthy(self.endpoint_uri)
if healthy:
try:
agent = self.agent_initializer(self.endpoint_uri)
return (
f"Endpoint healthy ✅ — {msg}. Agent initialized.",
gr.update(visible=False), # hide control panel
gr.update(visible=True), # show main panel
agent,
)
except Exception as e:
return (
f"Agent init failed: {e}",
gr.update(visible=True),
gr.update(visible=False),
None,
)
# not healthy → keep Start button path
return (
f"The AI LLM is sleeping due to inactivity: {msg}\nClick 'Start Agent' to wake and initialize.",
gr.update(visible=True),
gr.update(visible=False),
None,
)
demo.load(
on_load,
inputs=None,
outputs=[self.status_box, self.control_panel, self.main_panel, self.agent_state],
)
# --- MANUAL START (wake → health → init) ---
def on_start():
lines: list[str] = []
def push(s: str):
lines.append(s)
return ("\n".join(lines), gr.update(), gr.update(), None)
# Wake with progress
yield push("Waking endpoint… (this can take several minutes for cold starts)")
ok, err = wake_endpoint(self.endpoint_uri, max_wait=600, poll_every=5.0, log=lines.append)
yield ("\n".join(lines), gr.update(), gr.update(), None) # flush all logs
if not ok:
yield push(f"[Server message] {err or 'wake failed'}")
return
# Health → init
yield push("Endpoint awake ✅. Checking health…")
healthy, msg = is_endpoint_healthy(self.endpoint_uri)
if not healthy:
yield push(f"[Server message] {msg}")
return
yield push("Initializing agent…")
try:
agent = self.agent_initializer(self.endpoint_uri)
except Exception as e:
yield push(f"Agent init failed: {e}")
return
yield ("Agent initialized ✅", gr.update(visible=False), gr.update(visible=True), agent)
start_btn.click(
on_start,
inputs=None,
outputs=[self.status_box, self.control_panel, self.main_panel, self.agent_state],
)
self.app = demo
return self.app
# ---------- public API ----------
def launch(self, **kwargs):
return self.build().launch(**kwargs)
|