Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -88,7 +88,7 @@ def generate_tokens(model, generator):
|
|
88 |
for token in generator:
|
89 |
if token == model.token_eos() or stop_generation:
|
90 |
stop_generation = False
|
91 |
-
app.logger.info('
|
92 |
yield b'' # End of chunk
|
93 |
break
|
94 |
|
@@ -143,7 +143,7 @@ def generate_search_request():
|
|
143 |
tokens.append(LINEBREAK_TOKEN)
|
144 |
|
145 |
tokens = get_message_tokens(model=model, role="user", content=user_query[:200]) + [model.token_bos(), BOT_TOKEN, LINEBREAK_TOKEN]
|
146 |
-
|
147 |
generator = model.generate(
|
148 |
tokens,
|
149 |
top_k=top_k,
|
@@ -215,6 +215,7 @@ def generate_response():
|
|
215 |
app.logger.info('Prompt:')
|
216 |
app.logger.info(model.detokenize(tokens[:CONTEXT_SIZE]).decode("utf-8", errors="ignore"))
|
217 |
|
|
|
218 |
app.logger.info('Generate started')
|
219 |
generator = model.generate(
|
220 |
tokens[:CONTEXT_SIZE],
|
|
|
88 |
for token in generator:
|
89 |
if token == model.token_eos() or stop_generation:
|
90 |
stop_generation = False
|
91 |
+
app.logger.info('End generating')
|
92 |
yield b'' # End of chunk
|
93 |
break
|
94 |
|
|
|
143 |
tokens.append(LINEBREAK_TOKEN)
|
144 |
|
145 |
tokens = get_message_tokens(model=model, role="user", content=user_query[:200]) + [model.token_bos(), BOT_TOKEN, LINEBREAK_TOKEN]
|
146 |
+
stop_generation = False
|
147 |
generator = model.generate(
|
148 |
tokens,
|
149 |
top_k=top_k,
|
|
|
215 |
app.logger.info('Prompt:')
|
216 |
app.logger.info(model.detokenize(tokens[:CONTEXT_SIZE]).decode("utf-8", errors="ignore"))
|
217 |
|
218 |
+
stop_generation = False
|
219 |
app.logger.info('Generate started')
|
220 |
generator = model.generate(
|
221 |
tokens[:CONTEXT_SIZE],
|