File size: 2,880 Bytes
0b8e35a
d687a03
0b8e35a
d687a03
 
 
 
 
 
 
 
 
 
 
9a2fb52
 
 
d687a03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b8e35a
 
 
d687a03
 
0b8e35a
 
 
 
 
 
d687a03
0b8e35a
 
d8dd631
0b8e35a
 
 
d687a03
 
0b8e35a
9bbdc3c
7064ae5
9bbdc3c
0b8e35a
 
9bbdc3c
d687a03
0b8e35a
 
 
 
d687a03
0b8e35a
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import gradio as gr
from ctransformers import AutoModelForCausalLM
import random

system_prompt = """Dies ist eine Unterhaltung zwischen \
einem intelligenten, hilfsbereitem \
KI-Assistenten und einem Nutzer.
Der Assistent gibt Antworten in Form von Zitaten."""

prompt_format = "<|im_start|>system\n{system_prompt}\
<|im_end|>\n<|im_start|>user\nZitiere {prompt}\
<|im_end|>\n<|im_start|>assistant\n"

modes = {
    "Authentisch": {"temperature": 0.2, "top_k": 10},
    "Ausgeglichen": {"temperature": 1, "top_p": 0.9},
    "Chaotisch": {"temperature": 2},
}

authors = [
    "Johann Wolfgang von Goethe",
    "Friedrich Schiller",
    "Immanuel Kant",
    "Oscar Wilde",
    "Lü Bu We",
    "Wilhelm Busch",
    "Friedrich Nietzsche",
    "Karl Marx",
    "William Shakespeare",
    "Kurt Tucholsky",
    "Georg Christoph Lichtenberg",
    "Arthur Schopenhauer",
    "Seneca der Jüngere",
    "Martin Luther",
    "Mark Twain",
    "Cicero",
    "Marie von Ebner-Eschenbach",
    "Novalis",
    "Franz Kafka",
    "Jean-Jacques Rousseau",
    "Heinrich Heine",
    "Honoré de Balzac",
    "Georg Büchner",
    "Gotthold Ephraim Lessing",
    "Markus M. Ronner",
    "Gerhard Uhlenbruck",
    "Theodor Fontane",
    "Jean Paul",
    "Leo Tolstoi",
    "Friedrich Hebbel",
    "Horaz",
    "Albert Einstein",
    "Jesus von Nazareth",
    "Angela Merkel",
    "Ambrose Bierce",
    "Christian Morgenstern",
    "Friedrich Hölderlin",
    "Joseph Joubert",
    "François de La Rochefoucauld",
    "Otto von Bismarck",
    "Fjodor Dostojewski",
    "Ovid",
    "Rudolf Steiner",
    "Ludwig Börne",
    "Hugo von Hofmannsthal",
    "Laotse",
    "Thomas von Aquin",
    "Ludwig Wittgenstein",
    "Friedrich Engels",
    "Charles de Montesquieu",
]

model = AutoModelForCausalLM.from_pretrained(
    "caretech-owl/leo-hessionai-7B-quotes-gguf", model_type="Llama"
)


def quote(author: str = "", mode: str = "") -> str:
    author = author or random.choice(authors)
    mode = mode or "Authentisch"
    query = prompt_format.format(
        system_prompt=system_prompt,
        prompt=author,
    )
    print("=" * 20)
    print(query)
    output = model(query, stop="<|im_end|>", max_new_tokens=300, **modes[mode])
    print("-" * 20)
    print(output)
    return output


with gr.Blocks() as demo:
    gr.Markdown(
        "# Zitatgenerator\n\n*Hinweis: Generierung kann ein paar Minuten dauern.*"
    )
    with gr.Row():
        author = gr.Textbox(
            label="Zitat generieren für", lines=1, placeholder="Aristoteles"
        )
        mode = gr.Dropdown(
            choices=["Authentisch", "Ausgeglichen", "Chaotisch"],
            label="Modus",
            value="Ausgeglichen",
        )
    output = gr.Textbox(label="Zitat")
    quote_btn = gr.Button("Generiere Zitat")
    quote_btn.click(fn=quote, inputs=[author, mode], outputs=output)

demo.launch()