File size: 1,513 Bytes
6c900c2
 
 
 
 
2c9cfaf
6c900c2
 
 
 
2c9cfaf
6c900c2
 
 
 
 
2c9cfaf
6c900c2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer

model_path = "vinai/PhoGPT-7B5-Instruct"

config = AutoConfig.from_pretrained(model_path, trust_remote_code=True, token='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')
config.init_device = "cuda"
# config.attn_config['attn_impl'] = 'triton' # Enable if "triton" installed!

model = AutoModelForCausalLM.from_pretrained(
    model_path, config=config, torch_dtype=torch.bfloat16, trust_remote_code=True, token='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'
)
# If your GPU does not support bfloat16:
# model = AutoModelForCausalLM.from_pretrained(model_path, config=config, torch_dtype=torch.float16, trust_remote_code=True)
model.eval()

tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, token='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')


def answer(input_prompt):
    input_ids = tokenizer(input_prompt, return_tensors="pt")

    outputs = model.generate(
        inputs=input_ids["input_ids"].to("cuda"),
        attention_mask=input_ids["attention_mask"].to("cuda"),
        do_sample=True,
        temperature=1.0,
        top_k=50,
        top_p=0.9,
        max_new_tokens=1024,
        eos_token_id=tokenizer.eos_token_id,
        pad_token_id=tokenizer.pad_token_id
    )

    response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
    response = response.split("### Trả lời:")[1]
    return response

iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()