File size: 5,976 Bytes
f234576
37a6f17
 
f8f9857
8c44a86
f8f9857
 
502727a
8a4eb60
502727a
3b46104
2ddacc1
f8f9857
 
db111cc
3b46104
f8f9857
 
 
 
 
 
 
 
2ddacc1
b33166a
48b4e30
f79086f
 
 
cee0fe2
4c739f1
418e22b
6cda4f2
 
48b4e30
 
8deb142
 
 
5271c72
8deb142
0edb841
d2652c3
b33166a
 
 
401ed7b
7960b44
f46a04e
e0f5396
 
2dc9b40
0f8e818
b81c4dd
b33166a
8d75482
48b4e30
c5c5495
b33166a
 
6cda4f2
b33166a
6cda4f2
b33166a
6cda4f2
b33166a
 
 
3bb24fe
8a4eb60
befbf2f
f8f9857
 
9ba5511
b33166a
f8f9857
d2a0386
e0f5396
3b46104
d2a0386
2dc9b40
 
8deb142
b33166a
f8f9857
b33166a
 
 
d709f43
568fb1d
88cc46f
 
 
 
8d75482
88cc46f
 
 
747e4a2
88cc46f
 
 
 
 
 
 
568fb1d
c53e6cf
 
488133f
c53e6cf
88cc46f
 
 
 
 
 
bd71bc2
88cc46f
b33166a
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import os

os.system('pip install llama-cpp-python transformers torch')

import gradio as gr
from llama_cpp import Llama
from transformers import AutoTokenizer
from huggingface_hub import upload_file 


model_id = "Elijahbodden/eliGPTv1.3"

# MODEL
model = Llama.from_pretrained(
    repo_id=model_id,
    filename="model.gguf",
    verbose=True,
    n_threads = 2,
    n_threads_batch = 2,
    n_ctx=8192,
  )


# TOKENIZER AND TEMPLATE
tokenizer = AutoTokenizer.from_pretrained(model_id)

presets = {
    # Gaslight the model by adding sentence fragments to the start
    # It's weird but it works
    # If you're curious, default makes sure it doesn't hallucinate by showing that the next message is the start of a new convo
    # I also include "oh" and "shit" bc the model overuses them and this lets repetition penalties do their thing
    "Default" : [{"from": "human", "value": "shit good convo, bye"}, {"from": "gpt", "value": "Haha oh ok cool ttyl"}],
    # I swear this is for science 🗿
    "Rizz ????" : [{"from": "human", "value": "omg it's so hot when you flirt with me"}, {"from": "gpt", "value": "haha well you're lucky can even string a sentence together, the way you take my breath away 😘"}, {"from": "human", "value": "alright love you, gn!"}, {"from": "gpt", "value": "ttyl babe 💕"}],
    "Thinky" : [{"from": "human", "value": "Woah you just totally blew my mind\ngehh now the fermi paradox is going to be bugging me 24/7\nok ttyl"}, {"from": "gpt", "value": "nah our deep convos are always the best, we should talk again soon\nttyl"}],
}

def custom_lp_logits_processor(ids, logits, lp_start, lp_decay, prompt_tok_len):
    generated_tok_number = len(ids) - prompt_tok_len
    if (generated_tok_number > lp_start):
        print(len(ids), lp_start, pow(lp_decay, len(ids)-lp_start)) 
        logits[tokenizer.eos_token_id] *= pow(lp_decay, generated_tok_number-lp_start)
    return logits

def respond(
    message,
    history: list[tuple[str, str]],
    preset,
    min_p,
    temperature,
    lp_start,
    lp_decay,
    frequency_penalty,
    presence_penalty,
    max_tokens
):
    print(preset, temperature, min_p, lp_start, lp_decay, frequency_penalty, presence_penalty, max_tokens)
    
    messages = presets[preset].copy()
    for val in history:
        if val[0]:
            messages.append({"from": "human", "value": val[0]})
        if val[1]:
            messages.append({"from": "gpt", "value": val[1]})

    messages.append({"from": "human", "value": message})

    response = ""

    print(tokenizer.apply_chat_template(messages, tokenize=False))

    convo = tokenizer.apply_chat_template(messages, tokenize=True)
    for message in model.create_completion(
        convo,
        temperature=temperature,
        stream=True,
        stop=["<|im_end|>"],
        min_p=min_p,
        max_tokens=max_tokens,
        # Disable top-k pruning
        top_k=100000000,
        frequency_penalty=frequency_penalty,
        presence_penalty=presence_penalty,
        logits_processor=lambda ids, logits: custom_lp_logits_processor(ids, logits, lp_start, lp_decay, len(convo))
    ):
        token = message["choices"][0]["text"]

        response += token
        yield response
    print(response)

ci = gr.ChatInterface(
    respond,
    additional_inputs_accordion=gr.Accordion(label="Options", open=True),
    additional_inputs=[
        gr.Radio(presets.keys(), label="Personality preset", info="Slightly influence the model's personality [WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT THE MODEL WILL BECOME VERY SLOW]", value="Default"),
        # ("The model will become slow" is bc this uncaches the prompt and prompt processing is a big part of the generation time)
        gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Min_p", info="Lower values give it more \"personality\""),
        gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature", info="How chaotic should the model be?"),
        gr.Slider(minimum=0, maximum=512, value=10, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'),
        gr.Slider(minimum=0.5, maximum=1.5, value=1.015, step=0.001, label="Length penalty decay factor", info='How fast should that stop likelihood increase?'),
        gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'),
        gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
        gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"),
    ],
  )


with gr.Blocks(css=".bubble-gap {gap: 6px !important}", theme="shivi/calm_seafoam") as demo:
  gr.Markdown("# EliGPT v1.3")
  gr.Markdown("Llama 3 8b finetuned on 2.5k of my discord messages. [Train your own clone!](https://gist.github.com/Elijah-Bodden/1964bd02fcd19efef65f6e0cd92881c4)\nTHE MODEL IS VERY SLOW WHEN MULTIPLE PEOPLE ARE USING IT. YOU CAN DUPLICATE THE SPACE TO GET YOUR OWN DEDICATED INSTANCE.")
  with gr.Accordion("Q&A:", open=False):
      gr.Markdown("""Q: Why is the model so fucking slow  
        A: The model might be slow if it hasn't run recently or a lot of people are using it (it's running on llama.cpp on a single a very slow cpu). You can duplicate the space to get your own (free) instance with no wait times.  
        Q: Why is the model so dumb  
        A: Llama 3 8b is impressive, but it's still tiny. This model is basically what you'd get if you shoved my brain into a toddler's head - it's just too small to be smart  
        Q: Either it just made something up or I don't know you at all  
        A: Probably the former. It's prone to hallucinating facts and opinions I don't hold. Take everything it says with a big grain of salt  
      """)
  ci.render()

if __name__ == "__main__":
    demo.launch()