Loewolf commited on
Commit
bab1b4c
1 Parent(s): 4ff8e05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -24
app.py CHANGED
@@ -1,29 +1,8 @@
1
- from transformers import GPT2LMHeadModel, GPT2Tokenizer
2
- import torch
3
  import gradio as gr
4
 
5
- # Modell und Tokenizer laden
6
- model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT_1")
7
- tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT_1")
8
-
9
- # Eine Funktion, um Fragen an GPT-2 zu stellen
10
- def ask_gpt2(question, history):
11
- input_ids = tokenizer.encode(history + question, return_tensors="pt")
12
- attention_mask = torch.ones(input_ids.shape, dtype=torch.bool)
13
-
14
- # Antwort generieren
15
- output = model.generate(input_ids, attention_mask=attention_mask)
16
- reply = tokenizer.decode(output[0], skip_special_tokens=True)
17
- new_history = history + "Nutzer: " + question + "\nLöwolf GPT: " + reply + "\n"
18
- return new_history
19
-
20
- # Erstellen des Gradio-Interfaces
21
- interface = gr.Interface(
22
- fn=ask_gpt2,
23
- inputs=[gr.inputs.Textbox(lines=2, placeholder="Stelle deine Frage hier..."), gr.inputs.Textbox(lines=10, placeholder="Chat-Verlauf...")],
24
- outputs=gr.outputs.Textbox(label="Antwort"),
25
- layout="vertical"
26
- )
27
 
28
  # Starten der Gradio-App
29
  interface.launch()
 
 
 
 
1
  import gradio as gr
2
 
3
+ # Laden des GPT-Modells von Hugging Face und Erstellen der Gradio-Schnittstelle
4
+ interface = gr.Interface.load("models/Loewolf/GPT_1")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  # Starten der Gradio-App
7
  interface.launch()
8
+