import os from huggingface_hub import InferenceClient client = InferenceClient(api_key=os.environ["HF_API_TOKEN"]) response = client.text_generation( model="microsoft/Phi-4-mini-flash-reasoning", prompt="User: What is the capital of France?\nAssistant:", max_new_tokens=50, temperature=0.7 ) print(response)