Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -30,13 +30,13 @@ default_generation_config = GenerationConfig(
|
|
30 |
|
31 |
# ์๋ต ์์ฑ ํจ์
|
32 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
33 |
-
# ์์ฑ ์ค์
|
34 |
generation_config = GenerationConfig(
|
35 |
temperature=temperature,
|
36 |
top_p=top_p,
|
37 |
-
max_new_tokens=max_tokens,
|
38 |
**default_generation_config.to_dict() # ๊ธฐ๋ณธ ์ค์ ๊ณผ ๋ณํฉ
|
39 |
)
|
|
|
40 |
|
41 |
# ๋ํ ํ์คํ ๋ฆฌ์ ์์คํ
๋ฉ์์ง๋ฅผ ํฌํจํ ํ๋กฌํํธ ๊ตฌ์ฑ
|
42 |
prompt = prompt_template.safe_substitute({"inst": system_message})
|
@@ -58,6 +58,7 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
58 |
response += token
|
59 |
yield response
|
60 |
|
|
|
61 |
# Gradio Chat Interface ์ค์
|
62 |
demo = gr.ChatInterface(
|
63 |
respond,
|
|
|
30 |
|
31 |
# ์๋ต ์์ฑ ํจ์
|
32 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
33 |
+
# ์์ฑ ์ค์
|
34 |
generation_config = GenerationConfig(
|
35 |
temperature=temperature,
|
36 |
top_p=top_p,
|
|
|
37 |
**default_generation_config.to_dict() # ๊ธฐ๋ณธ ์ค์ ๊ณผ ๋ณํฉ
|
38 |
)
|
39 |
+
generation_config.max_new_tokens = max_tokens # max_tokens๋ง ๋ฐ๋ก ์ค์
|
40 |
|
41 |
# ๋ํ ํ์คํ ๋ฆฌ์ ์์คํ
๋ฉ์์ง๋ฅผ ํฌํจํ ํ๋กฌํํธ ๊ตฌ์ฑ
|
42 |
prompt = prompt_template.safe_substitute({"inst": system_message})
|
|
|
58 |
response += token
|
59 |
yield response
|
60 |
|
61 |
+
|
62 |
# Gradio Chat Interface ์ค์
|
63 |
demo = gr.ChatInterface(
|
64 |
respond,
|