from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import gradio as gr
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained("./codegeex4-all-9b", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    "./codegeex4-all-9b",
    torch_dtype=torch.bfloat16,
    low_cpu_mem_usage=True,
    trust_remote_code=True
).to(device).eval()


def codegee_inference(text):
    inputs = tokenizer.apply_chat_template([{"role": "user", "content": text}], add_generation_prompt=True, tokenize=True, return_tensors="pt", return_dict=True).to(device)
    with torch.no_grad():
        outputs = model.generate(**inputs,max_new_tokens=2048)
        outputs = outputs[:, inputs['input_ids'].shape[1]:]
        return tokenizer.decode(outputs[0], skip_special_tokens=True)
    return "error"


# 创建 Gradio 接口
iface = gr.Interface(
    fn=codegee_inference,
    inputs="text",
    outputs="text",
    title="CodegeeX4模型调用",
    description="输入文字，模型调用并显示结果。"
)

# 启动服务
iface.launch(server_name="0.0.0.0", server_port=7860)