Romi Nur Ismanto Claude Opus 4.6 (1M context) commited on
Commit
551c5e0
·
1 Parent(s): 8192600

Fix variable shadowing and add login check

Browse files

- Rename loop variable 'message' to 'chunk' to avoid shadowing the function parameter
- Add None check for hf_token when user is not logged in

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -14,6 +14,10 @@ def respond(
14
  """
15
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
  """
 
 
 
 
17
  client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
 
19
  messages = [{"role": "system", "content": system_message}]
@@ -24,14 +28,14 @@ def respond(
24
 
25
  response = ""
26
 
27
- for message in client.chat_completion(
28
  messages,
29
  max_tokens=max_tokens,
30
  stream=True,
31
  temperature=temperature,
32
  top_p=top_p,
33
  ):
34
- choices = message.choices
35
  token = ""
36
  if len(choices) and choices[0].delta.content:
37
  token = choices[0].delta.content
 
14
  """
15
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
  """
17
+ if hf_token is None:
18
+ yield "⚠️ Silakan login dulu dengan tombol Login di sidebar."
19
+ return
20
+
21
  client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
22
 
23
  messages = [{"role": "system", "content": system_message}]
 
28
 
29
  response = ""
30
 
31
+ for chunk in client.chat_completion(
32
  messages,
33
  max_tokens=max_tokens,
34
  stream=True,
35
  temperature=temperature,
36
  top_p=top_p,
37
  ):
38
+ choices = chunk.choices
39
  token = ""
40
  if len(choices) and choices[0].delta.content:
41
  token = choices[0].delta.content