File size: 1,310 Bytes
056cd4b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d9b5a69
056cd4b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import torch
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer

peft_model_id = f"danielsteinigen/GenerAd-AI"
config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForCausalLM.from_pretrained(
    config.base_model_name_or_path,
    return_dict=True,
    load_in_8bit=True,
    device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)

# Load the Lora model
model = PeftModel.from_pretrained(model, peft_model_id)


def make_inference(utterance):
    batch = tokenizer(
        f"User: {utterance}\n### What did the user talk about?\nOptions: hotel, restaurant, taxi, attraction, hospital, police, train\n\n### Domain:\n",
        return_tensors="pt",
    )

    with torch.cuda.amp.autocast():
        output_tokens = model.generate(**batch, max_new_tokens=2)

    return tokenizer.decode(output_tokens[0], skip_special_tokens=True)


if __name__ == "__main__":
    # make a gradio interface
    import gradio as gr

    gr.Interface(
        make_inference,
        [
            gr.inputs.Textbox(lines=2, label="Utterance"),
        ],
        gr.outputs.Textbox(label="Domain"),
        title="Domain Detection",
        description="Generative model that detects domains of utterances.",
    ).launch()