from ooc_f import ask_lm
k = json.loads(Path("k.json").read_text())
model = 'openai/gpt-4o-mini'
s = 'hi'
o, o_thk = ask_lm(url = k['url'], model=model, k = k['key'], q = 'hi')


# --------------------------------------------------
from openai import AsyncOpenAI
import json, datetime
k = json.loads(Path("k.json").read_text())
c = AsyncOpenAI(base_url=k['url'], api_key=k['key'])
# 2. /chat api
model = 'openai/gpt-4o-mini'
s = 'hi'
r = await c.chat.completions.create(model=model, messages=[{"role": "user", "content": s}])
r
'''
ChatCompletion(id='gen-1749630987-TpNDWzWUIDKV8tMr9qeB',
               choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Hello! How can I assist you today?', refusal=None, role='assistant', annotations=None, audio=None, function_call=None, tool_calls=None, reasoning=None), native_finish_reason='stop')], created=1749630987, model='openai/gpt-4o-mini', object='chat.completion', service_tier=None, system_fingerprint='fp_57db37749c', usage=CompletionUsage(completion_tokens=10, prompt_tokens=8, total_tokens=18, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None), prompt_tokens_details=PromptTokensDetails(audio_tokens=None, cached_tokens=0)), provider='Azure')
'''
# 🦜 : no reasoning...

s = 'hi'
r = await c.chat.completions.create(model=model,
                                    messages=[{"role": "user", "content": s}])
r


# 🦜 : a mock run
# --------------------------------------------------
# l = L.new(model="hi", url="https://www.openai.com/api/", note="Hi, OpenAI API!", q="hi")
# l.A = "yo"
# l.Tk = "thinking..."
# l.dump(Path.cwd().parent / "ixc-qa")


# --------------------------------------------------
from openai import AsyncOpenAI
from pathlib import Path
import json

k = json.loads(Path("k.json").read_text())
c = AsyncOpenAI(base_url=k['url'], api_key=k['key'])

# model = 'openai/gpt-4o-mini'
s = 'list all countries whose name doesn\'t contain the letter E'
model = 'openai/o1-pro'

r = await c.chat.completions.create(
    model=model,
    messages=[{"role": "user", "content": s}],
    reasoning_effort='medium',
    max_tokens=1e3
)

# You can inspect the response like this
print(json.dumps(r.model_dump(), indent=2))
print(r.choices[0].message.content)
print(r.choices[0].message.reasoning)




# --------------------------------------------------
 # Call streaming completion (failed)
stream = await c.chat.completions.create(
    model=model,
    messages=[{"role": "user", "content": s}],
    reasoning_effort='medium',
    max_tokens=1000,
    stream=True,  # Enable streaming
)

print("\n--- Response ---")
full_content = ""
async for chunk in stream:
    delta = chunk.choices[0].delta
    content_part = delta.content or ""
    print(content_part, end="", flush=True)
    full_content += content_part

