Loewolf commited on
Commit
54e1be2
1 Parent(s): d9a0ed7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -5
app.py CHANGED
@@ -3,14 +3,29 @@ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
  import torch
4
 
5
  # Initialisierung des Modells und des Tokenizers
6
- tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
7
- model = GPT2LMHeadModel.from_pretrained("gpt2")
8
 
9
- def generate_text(prompt):
10
  inputs = tokenizer.encode(prompt, return_tensors="pt")
11
- outputs = model.generate(inputs, max_length=50, num_return_sequences=1)
 
 
 
 
 
12
  text = tokenizer.decode(outputs[0], skip_special_tokens=True)
13
  return text
14
 
15
- iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
 
16
  iface.launch()
 
3
  import torch
4
 
5
  # Initialisierung des Modells und des Tokenizers
6
+ tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT1")
7
+ model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT1")
8
 
9
+ def generate_text(prompt, max_length, temperature, top_k, top_p):
10
  inputs = tokenizer.encode(prompt, return_tensors="pt")
11
+ outputs = model.generate(inputs,
12
+ max_length=max_length,
13
+ temperature=temperature,
14
+ top_k=top_k,
15
+ top_p=top_p,
16
+ num_return_sequences=1)
17
  text = tokenizer.decode(outputs[0], skip_special_tokens=True)
18
  return text
19
 
20
+ iface = gr.Interface(
21
+ fn=generate_text,
22
+ inputs=[
23
+ gr.inputs.Textbox(lines=2, placeholder="Geben Sie einen Prompt ein..."),
24
+ gr.inputs.Slider(minimum=10, maximum=100, default=50, label="Maximale Länge"),
25
+ gr.inputs.Slider(minimum=0.1, maximum=1.0, step=0.1, default=0.7, label="Temperatur"),
26
+ gr.inputs.Slider(minimum=0, maximum=50, default=20, label="Top K"),
27
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, step=0.1, default=0.9, label="Top P")
28
+ ],
29
+ outputs="text"
30
+ )
31
  iface.launch()