Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -52,6 +52,14 @@ def render_profile(profile: Optional[gr.OAuthProfile]) -> str:
|
|
| 52 |
display_name = getattr(profile, "name", None) or getattr(profile, "username", "Hugging Face user")
|
| 53 |
return f"Signed in as **{display_name}**."
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
def respond(
|
| 56 |
message,
|
| 57 |
image_files, # Changed parameter name and structure
|
|
@@ -75,18 +83,22 @@ def respond(
|
|
| 75 |
print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
|
| 76 |
print(f"Selected model (custom_model): {custom_model}")
|
| 77 |
print(f"Model search term: {model_search_term}")
|
| 78 |
-
print(f"Selected model from radio: {selected_model}")
|
| 79 |
|
| 80 |
api_token: Optional[str] = None
|
|
|
|
| 81 |
|
| 82 |
if oauth_token is not None and getattr(oauth_token, "token", None):
|
| 83 |
api_token = oauth_token.token
|
|
|
|
| 84 |
print("Using OAuth token from signed-in user for inference.")
|
| 85 |
elif HF_READ_TOKEN:
|
| 86 |
api_token = HF_READ_TOKEN
|
|
|
|
| 87 |
print("Using server-configured Hugging Face token for inference.")
|
|
|
|
|
|
|
| 88 |
|
| 89 |
-
# Initialize the Inference Client with
|
| 90 |
client_kwargs = {}
|
| 91 |
if api_token is not None:
|
| 92 |
client_kwargs["token"] = api_token
|
|
@@ -224,6 +236,24 @@ def respond(
|
|
| 224 |
except HfHubHTTPError as e:
|
| 225 |
status = getattr(e.response, "status_code", None)
|
| 226 |
if status in (401, 403):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
raise gr.Error(
|
| 228 |
"Failed to generate response: {}. Sign in with your Hugging Face account and retry."
|
| 229 |
.format(e)
|
|
@@ -372,6 +402,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
| 372 |
|
| 373 |
# Chat history state
|
| 374 |
chat_history = gr.State([])
|
|
|
|
| 375 |
|
| 376 |
# Function to filter models
|
| 377 |
def filter_models(search_term):
|
|
@@ -441,7 +472,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
| 441 |
custom_model,
|
| 442 |
search_term,
|
| 443 |
selected_model,
|
| 444 |
-
|
| 445 |
):
|
| 446 |
# Check if history is valid
|
| 447 |
if not history or len(history) == 0:
|
|
@@ -492,7 +523,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
| 492 |
custom_model,
|
| 493 |
search_term,
|
| 494 |
selected_model,
|
| 495 |
-
oauth_token
|
| 496 |
):
|
| 497 |
history[-1][1] = response
|
| 498 |
yield history
|
|
@@ -511,7 +542,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
| 511 |
custom_model,
|
| 512 |
search_term,
|
| 513 |
selected_model,
|
| 514 |
-
oauth_token
|
| 515 |
):
|
| 516 |
history[-1][1] = response
|
| 517 |
yield history
|
|
@@ -526,7 +557,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
| 526 |
bot,
|
| 527 |
[chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
|
| 528 |
frequency_penalty_slider, seed_slider, custom_model_box,
|
| 529 |
-
model_search_box, featured_model_radio],
|
| 530 |
[chatbot]
|
| 531 |
).then(
|
| 532 |
lambda: {"text": "", "files": []}, # Clear inputs after submission
|
|
@@ -550,7 +581,13 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
| 550 |
)
|
| 551 |
print("Featured model radio button change event linked.")
|
| 552 |
|
| 553 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 554 |
|
| 555 |
print("Gradio interface initialized.")
|
| 556 |
|
|
|
|
| 52 |
display_name = getattr(profile, "name", None) or getattr(profile, "username", "Hugging Face user")
|
| 53 |
return f"Signed in as **{display_name}**."
|
| 54 |
|
| 55 |
+
def refresh_auth_info(
|
| 56 |
+
profile: Optional[gr.OAuthProfile],
|
| 57 |
+
oauth_token: Optional[gr.OAuthToken],
|
| 58 |
+
):
|
| 59 |
+
"""Capture OAuth credentials for downstream callbacks."""
|
| 60 |
+
return oauth_token, render_profile(profile)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
def respond(
|
| 64 |
message,
|
| 65 |
image_files, # Changed parameter name and structure
|
|
|
|
| 83 |
print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
|
| 84 |
print(f"Selected model (custom_model): {custom_model}")
|
| 85 |
print(f"Model search term: {model_search_term}")
|
|
|
|
| 86 |
|
| 87 |
api_token: Optional[str] = None
|
| 88 |
+
token_source = "anonymous"
|
| 89 |
|
| 90 |
if oauth_token is not None and getattr(oauth_token, "token", None):
|
| 91 |
api_token = oauth_token.token
|
| 92 |
+
token_source = "user"
|
| 93 |
print("Using OAuth token from signed-in user for inference.")
|
| 94 |
elif HF_READ_TOKEN:
|
| 95 |
api_token = HF_READ_TOKEN
|
| 96 |
+
token_source = "space"
|
| 97 |
print("Using server-configured Hugging Face token for inference.")
|
| 98 |
+
else:
|
| 99 |
+
print("No default token configured; relying on user sign-in or anonymous access.")
|
| 100 |
|
| 101 |
+
# Initialize the Inference Client with appropriate credentials
|
| 102 |
client_kwargs = {}
|
| 103 |
if api_token is not None:
|
| 104 |
client_kwargs["token"] = api_token
|
|
|
|
| 236 |
except HfHubHTTPError as e:
|
| 237 |
status = getattr(e.response, "status_code", None)
|
| 238 |
if status in (401, 403):
|
| 239 |
+
if token_source == "user":
|
| 240 |
+
raise gr.Error(
|
| 241 |
+
(
|
| 242 |
+
"Failed to generate response: {}\n\n"
|
| 243 |
+
"Your Hugging Face session must grant the **inference-api** (Make calls to Inference Providers) "
|
| 244 |
+
"permission. Sign out, then sign back in and approve the requested scopes, or update the Space "
|
| 245 |
+
"metadata to request that scope."
|
| 246 |
+
).format(e)
|
| 247 |
+
) from e
|
| 248 |
+
if token_source == "space":
|
| 249 |
+
raise gr.Error(
|
| 250 |
+
(
|
| 251 |
+
"Failed to generate response: {}\n\n"
|
| 252 |
+
"The Space-level token lacks Inference Provider access. Update the `HF_READ_TOKEN` secret with "
|
| 253 |
+
"a token that has the `Make calls to Inference Providers` permission, or have users sign in with "
|
| 254 |
+
"their own accounts."
|
| 255 |
+
).format(e)
|
| 256 |
+
) from e
|
| 257 |
raise gr.Error(
|
| 258 |
"Failed to generate response: {}. Sign in with your Hugging Face account and retry."
|
| 259 |
.format(e)
|
|
|
|
| 402 |
|
| 403 |
# Chat history state
|
| 404 |
chat_history = gr.State([])
|
| 405 |
+
oauth_token_state = gr.State(None)
|
| 406 |
|
| 407 |
# Function to filter models
|
| 408 |
def filter_models(search_term):
|
|
|
|
| 472 |
custom_model,
|
| 473 |
search_term,
|
| 474 |
selected_model,
|
| 475 |
+
oauth_token_obj: Optional[gr.OAuthToken] = None,
|
| 476 |
):
|
| 477 |
# Check if history is valid
|
| 478 |
if not history or len(history) == 0:
|
|
|
|
| 523 |
custom_model,
|
| 524 |
search_term,
|
| 525 |
selected_model,
|
| 526 |
+
oauth_token=oauth_token_obj,
|
| 527 |
):
|
| 528 |
history[-1][1] = response
|
| 529 |
yield history
|
|
|
|
| 542 |
custom_model,
|
| 543 |
search_term,
|
| 544 |
selected_model,
|
| 545 |
+
oauth_token=oauth_token_obj,
|
| 546 |
):
|
| 547 |
history[-1][1] = response
|
| 548 |
yield history
|
|
|
|
| 557 |
bot,
|
| 558 |
[chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
|
| 559 |
frequency_penalty_slider, seed_slider, custom_model_box,
|
| 560 |
+
model_search_box, featured_model_radio, oauth_token_state],
|
| 561 |
[chatbot]
|
| 562 |
).then(
|
| 563 |
lambda: {"text": "", "files": []}, # Clear inputs after submission
|
|
|
|
| 581 |
)
|
| 582 |
print("Featured model radio button change event linked.")
|
| 583 |
|
| 584 |
+
login_button.click(
|
| 585 |
+
refresh_auth_info,
|
| 586 |
+
inputs=None,
|
| 587 |
+
outputs=[oauth_token_state, auth_status],
|
| 588 |
+
)
|
| 589 |
+
|
| 590 |
+
demo.load(refresh_auth_info, inputs=None, outputs=[oauth_token_state, auth_status])
|
| 591 |
|
| 592 |
print("Gradio interface initialized.")
|
| 593 |
|