File size: 1,254 Bytes
3c0e5e4
 
68febf7
3c0e5e4
007f585
 
3c0e5e4
f96fb6b
3c0e5e4
 
 
007f585
68febf7
 
 
 
 
 
3c0e5e4
 
 
 
 
 
 
 
 
 
 
f4f0e98
3c0e5e4
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForCausalLM

tokenizer = AutoTokenizer.from_pretrained("facebook/galactica-1.3b")
model = AutoModelForCausalLM.from_pretrained("facebook/galactica-1.3b")

text2text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer, num_workers=2)

def predict(text):
    text = text.strip()
    out_text = text2text_generator(text, max_length=128, 
                              temperature=0.7, 
                              do_sample=True,
                              eos_token_id = tokenizer.eos_token_id,
                              bos_token_id = tokenizer.bos_token_id,
                              pad_token_id = tokenizer.pad_token_id,
                         )[0]['generated_text']
    out_text = "<p>" + out_text + "</p>"
    out_text = out_text.replace(text, text + "<b><span style='background-color: #ffffcc;'>")
    out_text = out_text +  "</span></b>"
    out_text = out_text.replace("\n", "<br>")
    return out_text

iface = gr.Interface(
    fn=predict, 
    inputs=gr.Textbox(lines=10),
    outputs=gr.HTML(),
    description="Galactica",
    examples=[["The attention mechanism in LLM is"]]
)

iface.launch(share=True)