upload app
Browse files
app.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, TFAutoModelForCausalLM
|
3 |
+
|
4 |
+
|
5 |
+
SAVED_CHECKPOINT = 'mikegarts/distilgpt2-erichmariaremarque'
|
6 |
+
MIN_WORDS = 60
|
7 |
+
|
8 |
+
|
9 |
+
def get_model():
|
10 |
+
model = TFAutoModelForCausalLM.from_pretrained(SAVED_CHECKPOINT)
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(SAVED_CHECKPOINT)
|
12 |
+
return model, tokenizer
|
13 |
+
|
14 |
+
|
15 |
+
def generate(prompt):
|
16 |
+
model, tokenizer = get_model()
|
17 |
+
|
18 |
+
input_context = prompt
|
19 |
+
input_ids = tokenizer.encode(input_context, return_tensors="tf")
|
20 |
+
|
21 |
+
outputs = model.generate(
|
22 |
+
input_ids=input_ids,
|
23 |
+
max_length=MIN_WORDS,
|
24 |
+
temperature=0.7,
|
25 |
+
num_return_sequences=1,
|
26 |
+
do_sample=True
|
27 |
+
)
|
28 |
+
|
29 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True).rsplit('.', 1)[0] + '.'
|
30 |
+
|
31 |
+
|
32 |
+
def predict(prompt):
|
33 |
+
return generate(prompt=prompt)
|
34 |
+
|
35 |
+
|
36 |
+
title = "What would Remarques say?"
|
37 |
+
description = """
|
38 |
+
The bot was trained to complete your prompt as if it was a begining of a paragraph of Remarque's book.
|
39 |
+
<img src="https://upload.wikimedia.org/wikipedia/commons/1/10/Bundesarchiv_Bild_183-R04034%2C_Erich_Maria_Remarque_%28cropped%29.jpg" align=center width=200px>
|
40 |
+
"""
|
41 |
+
|
42 |
+
gr.Interface(
|
43 |
+
fn=predict,
|
44 |
+
inputs="textbox",
|
45 |
+
outputs="text",
|
46 |
+
title=title,
|
47 |
+
description=description,
|
48 |
+
examples=[["I was drinking because"], ["Who is Karl for me?"]]
|
49 |
+
).launch(debug=True)
|