Elijahbodden commited on
Commit
f79086f
β€’
1 Parent(s): 6cda4f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -6
app.py CHANGED
@@ -1,5 +1,3 @@
1
- # ADD DISCLAIMERS
2
- # why slow? I'm not made of gold
3
  import os
4
 
5
  os.system('pip install llama-cpp-python transformers torch')
@@ -28,6 +26,9 @@ model = Llama.from_pretrained(
28
  tokenizer = AutoTokenizer.from_pretrained(model_id)
29
 
30
  presets = {
 
 
 
31
  "Default" : [{"from": "human", "value": "good convo, bye"}, {"from": "gpt", "value": "Haha cool ttyl"}],
32
  "Rizz ????" : [{"from": "human", "value": "omg it's so hot when you flirt with me"}, {"from": "gpt", "value": "haha well you're lucky can even string a sentence together, the way you take my breath away 😘"}, {"from": "human", "value": "alright love you, gn!"}, {"from": "gpt", "value": "ttyl babe πŸ’•"}],
33
  "Thinky" : [{"from": "human", "value": "Woah you just totally blew my mind\ngehh now the fermi paradox is going to be bugging me 24/7\nok ttyl"}, {"from": "gpt", "value": "nah our deep convos are always the best, we should talk again soon\nttyl"}],
@@ -87,18 +88,32 @@ def respond(
87
 
88
 
89
  demo = gr.ChatInterface(
 
 
 
 
 
90
  respond,
91
  additional_inputs_accordion=gr.Accordion(label="Options", open=True),
92
  css=".bubble-gap {gap: 6px !important}",
93
  theme="shivi/calm_seafoam",
94
- description="The model may be slow if it hasn't run recently or a lot of people are using it",
 
 
 
 
 
 
 
 
95
  title="EliGPT v1.3",
96
  additional_inputs=[
97
- gr.Radio(presets.keys(), label="Personality preset", info="VERY SLIGHTLY influence the model's personality [WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT, THE MODEL WILL BECOME VERY SLOW]", value="Default"),
98
- gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature", info="How chaotic should the model be?"),
99
  gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Min_p", info="Lower values give it more \"personality\""),
 
100
  gr.Slider(minimum=0, maximum=512, value=5, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'),
101
- gr.Slider(minimum=0.5, maximum=1.5, value=1.01, step=0.001, label="Length penalty decay factor", info='How fast should that stop likelihood increase?'),
102
  gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'),
103
  gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
104
  gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"),
 
 
 
1
  import os
2
 
3
  os.system('pip install llama-cpp-python transformers torch')
 
26
  tokenizer = AutoTokenizer.from_pretrained(model_id)
27
 
28
  presets = {
29
+ # Gaslight the model by adding sentence fragments to the start
30
+ # It's weird but it works
31
+ # If you're curious, default makes sure it doesn't hallucinate by showing that the next message is the start of a new convo
32
  "Default" : [{"from": "human", "value": "good convo, bye"}, {"from": "gpt", "value": "Haha cool ttyl"}],
33
  "Rizz ????" : [{"from": "human", "value": "omg it's so hot when you flirt with me"}, {"from": "gpt", "value": "haha well you're lucky can even string a sentence together, the way you take my breath away 😘"}, {"from": "human", "value": "alright love you, gn!"}, {"from": "gpt", "value": "ttyl babe πŸ’•"}],
34
  "Thinky" : [{"from": "human", "value": "Woah you just totally blew my mind\ngehh now the fermi paradox is going to be bugging me 24/7\nok ttyl"}, {"from": "gpt", "value": "nah our deep convos are always the best, we should talk again soon\nttyl"}],
 
88
 
89
 
90
  demo = gr.ChatInterface(
91
+ # train your own
92
+ # Dumb
93
+ # thank llama.cpp and unsloth/explain what this is
94
+ # It will make up facts and opinions that i don't hold
95
+ # Like if you took my brain and distilled it to be as smart as a toddler
96
  respond,
97
  additional_inputs_accordion=gr.Accordion(label="Options", open=True),
98
  css=".bubble-gap {gap: 6px !important}",
99
  theme="shivi/calm_seafoam",
100
+ description="""Llama 3 8b finetuned on 2.5k of my discord messages. [Train your own clone!](https://gist.github.com/Elijah-Bodden/1964bd02fcd19efef65f6e0cd92881c4)
101
+ Q&A:
102
+ Q Why is the model so fucking slow
103
+ A The model might be slow if it hasn't run recently or a lot of people are using it (it's running on llama.cpp on a single a very slow cpu). You can duplicate the space to get your own (free) instance with no wait times.
104
+ Q Why is the model so dumb
105
+ A Llama 3 8b is impressive, but it's still tiny. This model is basically what you'd get if you shoved my brain into a toddler's head - it's just too small to be smart
106
+ Q Either it just made something up or I don't know you at all
107
+ A Probably the former. It's prone to hallucinating facts and opinions I don't hold. Take everything it says with a big grain of salt
108
+ """,
109
  title="EliGPT v1.3",
110
  additional_inputs=[
111
+ gr.Radio(presets.keys(), label="Personality preset", info="VERY SLIGHTLY influence the model's personality [WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT THE MODEL WILL BECOME VERY SLOW]", value="Default"),
112
+ # ("The model will become slow" is bc this uncaches the prompt and prompt processing is a big part of the generation time)
113
  gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Min_p", info="Lower values give it more \"personality\""),
114
+ gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature", info="How chaotic should the model be?"),
115
  gr.Slider(minimum=0, maximum=512, value=5, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'),
116
+ gr.Slider(minimum=0.5, maximum=1.5, value=1.015, step=0.001, label="Length penalty decay factor", info='How fast should that stop likelihood increase?'),
117
  gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'),
118
  gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
119
  gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"),