import gradio as gr import os import torch from transformers import AutoTokenizer, AutoModelForCausalLM hf_token = os.environ.get("HF_TOKEN") tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-4-reasoning", device_map="auto", # required for efficient memory use torch_dtype=torch.float16, # or "auto" load_in_4bit=True) model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-4-reasoning").cuda() def generate_response(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_new_tokens=16384) return tokenizer.decode(outputs[0], skip_special_tokens=True) gr.Interface(fn=generate_response, inputs="text", outputs="text", title="Ganit-R1-14B Demo").launch()