import gradio as gr from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer model_path = "vinai/PhoGPT-7B5-Instruct" config = AutoConfig.from_pretrained(model_path, trust_remote_code=True, token='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') config.init_device = "cuda" # config.attn_config['attn_impl'] = 'triton' # Enable if "triton" installed! model = AutoModelForCausalLM.from_pretrained( model_path, config=config, torch_dtype=torch.bfloat16, trust_remote_code=True, token='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' ) # If your GPU does not support bfloat16: # model = AutoModelForCausalLM.from_pretrained(model_path, config=config, torch_dtype=torch.float16, trust_remote_code=True) model.eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, token='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') def answer(input_prompt): input_ids = tokenizer(input_prompt, return_tensors="pt") outputs = model.generate( inputs=input_ids["input_ids"].to("cuda"), attention_mask=input_ids["attention_mask"].to("cuda"), do_sample=True, temperature=1.0, top_k=50, top_p=0.9, max_new_tokens=1024, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id ) response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] response = response.split("### Trả lời:")[1] return response iface = gr.Interface(fn=greet, inputs="text", outputs="text") iface.launch()