Loewolf commited on
Commit
f372d1e
1 Parent(s): 1b6e0a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -27
app.py CHANGED
@@ -1,45 +1,59 @@
1
  import gradio as gr
2
- from transformers import pipeline, set_seed
3
-
4
- # Setzen eines Seeds für Reproduzierbarkeit
5
- set_seed(42)
6
-
7
- # Laden des GPT-Modells mit Hugging Face Pipeline für CPU
8
- model = pipeline("text-generation", model="Loewolf/GPT_1", device=-1) # device=-1 für CPU
9
- tokenizer = model.tokenizer
10
-
11
- def generate_text(input_text, temp, top_k, top_p, length):
12
- # Textgenerierung mit spezifischen Parametern
13
- generated_texts = model(input_text, max_length=length, temperature=temp, top_k=top_k, top_p=top_p, num_return_sequences=1)
14
- return generated_texts[0]['generated_text']
15
-
16
- def chat_with_model(user_input, history, temperature, top_k, top_p, length, system_prompt):
17
- combined_input = f"{history}\nNutzer: {user_input}\n{system_prompt}:"
18
- response = generate_text(combined_input, temperature, top_k, top_p, length)
19
- new_history = f"{combined_input}\n{response}"
20
- return "", new_history # Leerer String für user_input, um das Eingabefeld zurückzusetzen
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  # Erstellen der Gradio-Schnittstelle
23
  with gr.Blocks() as demo:
24
  with gr.Row():
25
  with gr.Column():
26
- history = gr.Textbox(label="Chatverlauf", lines=10, interactive=False)
27
  user_input = gr.Textbox(label="Deine Nachricht")
28
  submit_btn = gr.Button("Senden")
29
  with gr.Column():
30
- system_prompt = gr.Textbox(label="System Prompt", value="Löwolf GPT")
31
- temperature = gr.Slider(minimum=0, maximum=1, step=0.01, label="Temperature", value=0.9)
32
  top_k = gr.Slider(minimum=0, maximum=100, step=1, label="Top K", value=50)
33
  top_p = gr.Slider(minimum=0, maximum=1, step=0.01, label="Top P", value=0.9)
34
- length = gr.Slider(minimum=1, maximum=100, step=1, label="Länge", value=20)
35
 
36
  submit_btn.click(
37
- chat_with_model,
38
- inputs=[user_input, history, temperature, top_k, top_p, length, system_prompt],
39
- outputs=[user_input, history]
40
  )
41
 
42
  # Starten der Gradio-App
43
  demo.launch()
44
 
45
-
 
1
  import gradio as gr
2
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
+ import torch
4
+
5
+ # Initialisierung des Modells und des Tokenizers
6
+ tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT1")
7
+ model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT1")
8
+ model.to("cpu") # Stellen Sie sicher, dass das Modell auf der CPU läuft
9
+
10
+ # Chat-Verlauf initialisieren
11
+ chat_history = []
12
+
13
+ def generate_text(input_text, history, temperature, top_k, top_p, max_length):
14
+ global chat_history
15
+ # Hinzufügen der neuen Eingabe zum Chat-Verlauf
16
+ chat_history.append(f"Nutzer: {input_text}")
17
+ new_input = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors='pt')
18
+ new_input = new_input.to("cpu") # Stellen Sie sicher, dass Tensoren auf der CPU sind
19
+
20
+ # Generieren der Antwort des Modells
21
+ chat_history_ids = tokenizer.encode(" ".join(chat_history) + tokenizer.eos_token, return_tensors='pt')
22
+ chat_history_ids = chat_history_ids.to("cpu")
23
+
24
+ # Achten Sie auf die Größe der Tokens für das Modell
25
+ chat_history_ids = chat_history_ids[:, -tokenizer.model_max_length :]
26
+
27
+ # Generieren der Antwort
28
+ model_output = model.generate(chat_history_ids, max_length=max_length, pad_token_id=tokenizer.eos_token_id,
29
+ temperature=temperature, top_k=top_k, top_p=top_p)
30
+
31
+ # Antwort in Text umwandeln
32
+ response = tokenizer.decode(model_output[:, chat_history_ids.shape[-1]:][0], skip_special_tokens=True)
33
+ chat_history.append(f"Löwolf GPT: {response}")
34
+
35
+ # Rückgabe des aktualisierten Chat-Verlaufs
36
+ return " ".join(chat_history)
37
 
38
  # Erstellen der Gradio-Schnittstelle
39
  with gr.Blocks() as demo:
40
  with gr.Row():
41
  with gr.Column():
42
+ history = gr.Textbox(label="Chatverlauf", value=" ".join(chat_history), lines=10, interactive=False)
43
  user_input = gr.Textbox(label="Deine Nachricht")
44
  submit_btn = gr.Button("Senden")
45
  with gr.Column():
46
+ temperature = gr.Slider(minimum=0, maximum=1, step=0.01, label="Temperature", value=0.7)
 
47
  top_k = gr.Slider(minimum=0, maximum=100, step=1, label="Top K", value=50)
48
  top_p = gr.Slider(minimum=0, maximum=1, step=0.01, label="Top P", value=0.9)
49
+ max_length = gr.Slider(minimum=1, maximum=100, step=1, label="Maximale Länge", value=60)
50
 
51
  submit_btn.click(
52
+ generate_text,
53
+ inputs=[user_input, history, temperature, top_k, top_p, max_length],
54
+ outputs=[history]
55
  )
56
 
57
  # Starten der Gradio-App
58
  demo.launch()
59