Spaces:
Sleeping
Sleeping
Kai Izumoto
commited on
Create Debug.py
Browse files
Debug.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# hf_diag.py
|
| 2 |
+
from huggingface_hub import InferenceClient
|
| 3 |
+
import os, json
|
| 4 |
+
|
| 5 |
+
TOK = os.getenv("HF_TOKEN") or "paste-your-token-here"
|
| 6 |
+
MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct" # try both your primary and fallback model names
|
| 7 |
+
client = InferenceClient(token=TOK)
|
| 8 |
+
|
| 9 |
+
def try_chat():
|
| 10 |
+
messages = [{"role":"system","content":"You are a helpful assistant."},
|
| 11 |
+
{"role":"user","content":"Say hello and return two short code files in JSON: files mapping with main.py and tests/test_main.py"}]
|
| 12 |
+
try:
|
| 13 |
+
print("Calling chat_completion (non-stream)...")
|
| 14 |
+
resp = client.chat_completion(messages=messages, model=MODEL, stream=False, max_new_tokens=256)
|
| 15 |
+
print("TYPE:", type(resp))
|
| 16 |
+
try:
|
| 17 |
+
print("As repr:", repr(resp)[:1000])
|
| 18 |
+
except:
|
| 19 |
+
print("Couldn't repr resp")
|
| 20 |
+
# try to JSON-dump
|
| 21 |
+
try:
|
| 22 |
+
print("JSON-able?", json.dumps(resp)[:1000])
|
| 23 |
+
except Exception as e:
|
| 24 |
+
print("Not JSON serializable:", e)
|
| 25 |
+
except Exception as e:
|
| 26 |
+
print("chat_completion failed:", e)
|
| 27 |
+
|
| 28 |
+
if __name__ == "__main__":
|
| 29 |
+
try_chat()
|