Commit
•
5b27e8b
1
Parent(s):
f41c6d3
Update app.py
Browse files
app.py
CHANGED
@@ -44,58 +44,23 @@ examples = [
|
|
44 |
["You have $100, and your goal is to turn that into as much money as possible with AI and Machine Learning. Please respond with detailed plan.", "", 300, 1.2, 0.5, 0.5, 0.5],
|
45 |
]
|
46 |
|
47 |
-
def respond(
|
48 |
-
|
49 |
-
input=None,
|
50 |
-
token_count=333,
|
51 |
-
temperature=1.0,
|
52 |
-
top_p=0.5,
|
53 |
-
presencePenalty = 0.5,
|
54 |
-
countPenalty = 0.5,
|
55 |
-
history=None
|
56 |
-
):
|
57 |
-
args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
|
58 |
-
alpha_frequency = countPenalty,
|
59 |
-
alpha_presence = presencePenalty,
|
60 |
-
token_ban = [], # ban the generation of some tokens
|
61 |
-
token_stop = [0]) # stop generation whenever you see any token here
|
62 |
-
|
63 |
-
instruction = re.sub(r'\n{2,}', '\n', instruction).strip().replace('\r\n','\n')
|
64 |
-
ctx = generate_prompt(instruction, input, history)
|
65 |
-
print(ctx + "\n")
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
occurrence = {}
|
71 |
-
state = None
|
72 |
-
for i in range(int(token_count)):
|
73 |
-
out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state)
|
74 |
-
for n in occurrence:
|
75 |
-
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
|
76 |
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
for xxx in occurrence:
|
82 |
-
occurrence[xxx] *= 0.996
|
83 |
-
if token not in occurrence:
|
84 |
-
occurrence[token] = 1
|
85 |
-
else:
|
86 |
-
occurrence[token] += 1
|
87 |
-
|
88 |
-
tmp = pipeline.decode(all_tokens[out_last:])
|
89 |
-
if '\ufffd' not in tmp:
|
90 |
-
out_str += tmp
|
91 |
-
out_last = i + 1
|
92 |
-
if '\n\n' in out_str:
|
93 |
-
break
|
94 |
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
99 |
|
100 |
def generator(
|
101 |
instruction,
|
@@ -188,25 +153,8 @@ with gr.Blocks(title=title) as demo:
|
|
188 |
history = history or []
|
189 |
return "", history + [[message, None]]
|
190 |
|
191 |
-
def chat(history):
|
192 |
-
print("History: ", history)
|
193 |
-
global token_count_chat, temperature_chat, top_p_chat, presence_penalty_chat, count_penalty_chat
|
194 |
-
# get the last user message and the additional parameters
|
195 |
-
instruction = msg.value
|
196 |
-
token_count = token_count_chat.value
|
197 |
-
|
198 |
-
temperature = temperature_chat.value
|
199 |
-
top_p = top_p_chat.value
|
200 |
-
presence_penalty = presence_penalty_chat.value
|
201 |
-
count_penalty = count_penalty_chat.value
|
202 |
-
|
203 |
-
response = respond(instruction, None, token_count, temperature, top_p, presence_penalty, count_penalty, history)
|
204 |
-
|
205 |
-
history[-1][1] = response
|
206 |
-
return history
|
207 |
-
|
208 |
msg.submit(user_msg, [msg, chatbot], [msg, chatbot], queue=False).then(
|
209 |
-
|
210 |
)
|
211 |
|
212 |
with gr.Tab("Instruct mode"):
|
|
|
44 |
["You have $100, and your goal is to turn that into as much money as possible with AI and Machine Learning. Please respond with detailed plan.", "", 300, 1.2, 0.5, 0.5, 0.5],
|
45 |
]
|
46 |
|
47 |
+
def respond(history=None):
|
48 |
+
global token_count_chat, temperature_chat, top_p_chat, presence_penalty_chat, count_penalty_chat
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
+
# get the lastest user message and the additional parameters
|
51 |
+
instruction = msg.value
|
52 |
+
token_count = token_count_chat.value
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
+
temperature = temperature_chat.value
|
55 |
+
top_p = top_p_chat.value
|
56 |
+
presence_penalty = presence_penalty_chat.value
|
57 |
+
count_penalty = count_penalty_chat.value
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
+
history[-1][1] = ""
|
60 |
+
|
61 |
+
for character in generator(instruction, None, token_count, temperature, top_p, presence_penalty, count_penalty, history):
|
62 |
+
history[-1][1] += character
|
63 |
+
yield history
|
64 |
|
65 |
def generator(
|
66 |
instruction,
|
|
|
153 |
history = history or []
|
154 |
return "", history + [[message, None]]
|
155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
msg.submit(user_msg, [msg, chatbot], [msg, chatbot], queue=False).then(
|
157 |
+
respond, chatbot, chatbot, api_name="chat"
|
158 |
)
|
159 |
|
160 |
with gr.Tab("Instruct mode"):
|