File size: 756 Bytes
afd4327
a5b1bd1
1d2566f
65a787e
afd4327
a5b1bd1
 
50aa497
 
 
 
 
6cd4107
afd4327
65a787e
 
 
 
afd4327
65a787e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import gradio as gr
import os
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

hf_token = os.environ.get("HF_TOKEN")

tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-4-reasoning", 
    device_map="auto",  # required for efficient memory use
    torch_dtype=torch.float16,  # or "auto"
    load_in_4bit=True)

model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-4-reasoning").cuda()

def generate_response(prompt):
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    outputs = model.generate(**inputs, max_new_tokens=16384)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

gr.Interface(fn=generate_response, inputs="text", outputs="text", title="Ganit-R1-14B Demo").launch()