Update app.py
Browse files
app.py
CHANGED
@@ -8,20 +8,20 @@ HF_TOKEN = os.getenv("HF_TOKEN")
|
|
8 |
|
9 |
# Available LLM models
|
10 |
LLM_MODELS = {
|
11 |
-
"
|
12 |
-
"
|
13 |
-
"
|
14 |
-
"
|
15 |
-
"
|
16 |
-
"
|
17 |
-
"
|
18 |
}
|
19 |
|
20 |
# Default selected models
|
21 |
DEFAULT_MODELS = [
|
22 |
-
"
|
23 |
-
"
|
24 |
-
"
|
25 |
]
|
26 |
|
27 |
# Initialize clients with token
|
@@ -46,7 +46,10 @@ def respond_single(
|
|
46 |
temperature: float,
|
47 |
top_p: float,
|
48 |
):
|
49 |
-
|
|
|
|
|
|
|
50 |
|
51 |
for user, assistant in history:
|
52 |
if user:
|
@@ -58,15 +61,18 @@ def respond_single(
|
|
58 |
|
59 |
response = ""
|
60 |
try:
|
61 |
-
for msg in client.
|
62 |
-
|
63 |
-
|
64 |
stream=True,
|
65 |
temperature=temperature,
|
66 |
top_p=top_p,
|
67 |
):
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
70 |
except Exception as e:
|
71 |
yield f"Error: {str(e)}"
|
72 |
|
@@ -106,18 +112,11 @@ def respond_all(
|
|
106 |
generate(clients[selected_models[2]], history3),
|
107 |
)
|
108 |
|
109 |
-
|
110 |
-
|
111 |
css = """
|
112 |
-
footer {
|
113 |
-
visibility: hidden;
|
114 |
-
}
|
115 |
"""
|
116 |
|
117 |
-
|
118 |
-
|
119 |
-
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
|
120 |
-
|
121 |
with gr.Row():
|
122 |
model_choices = gr.Checkboxgroup(
|
123 |
choices=list(LLM_MODELS.values()),
|
@@ -149,27 +148,27 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
|
|
149 |
with gr.Row():
|
150 |
with gr.Column():
|
151 |
system_message = gr.Textbox(
|
152 |
-
value="
|
153 |
label="System message"
|
154 |
)
|
155 |
max_tokens = gr.Slider(
|
156 |
minimum=1,
|
157 |
-
maximum=
|
158 |
-
value=
|
159 |
step=1,
|
160 |
label="Max new tokens"
|
161 |
)
|
162 |
temperature = gr.Slider(
|
163 |
-
minimum=0
|
164 |
-
maximum=
|
165 |
value=0.7,
|
166 |
step=0.1,
|
167 |
label="Temperature"
|
168 |
)
|
169 |
top_p = gr.Slider(
|
170 |
-
minimum=0
|
171 |
-
maximum=1
|
172 |
-
value=0.
|
173 |
step=0.05,
|
174 |
label="Top-p"
|
175 |
)
|
@@ -181,6 +180,20 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
|
|
181 |
placeholder="Enter text and press enter",
|
182 |
container=False
|
183 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
|
185 |
def submit_message(message, file):
|
186 |
return respond_all(
|
@@ -206,5 +219,4 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
|
|
206 |
if __name__ == "__main__":
|
207 |
if not HF_TOKEN:
|
208 |
print("Warning: HF_TOKEN environment variable is not set")
|
209 |
-
demo.launch()
|
210 |
-
|
|
|
8 |
|
9 |
# Available LLM models
|
10 |
LLM_MODELS = {
|
11 |
+
"Mistral": "mistralai/Mistral-7B-Instruct-v0.2",
|
12 |
+
"Zephyr": "HuggingFaceH4/zephyr-7b-beta",
|
13 |
+
"OpenChat": "openchat/openchat-3.5",
|
14 |
+
"Llama2": "meta-llama/Llama-2-7b-chat-hf",
|
15 |
+
"Phi": "microsoft/phi-2",
|
16 |
+
"Neural": "nvidia/neural-chat-7b-v3-1",
|
17 |
+
"Starling": "HuggingFaceH4/starling-lm-7b-alpha"
|
18 |
}
|
19 |
|
20 |
# Default selected models
|
21 |
DEFAULT_MODELS = [
|
22 |
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
23 |
+
"HuggingFaceH4/zephyr-7b-beta",
|
24 |
+
"openchat/openchat-3.5"
|
25 |
]
|
26 |
|
27 |
# Initialize clients with token
|
|
|
46 |
temperature: float,
|
47 |
top_p: float,
|
48 |
):
|
49 |
+
system_prefix = """๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ ๊ฒ. ๋๋ ์ฃผ์ด์ง ๋ด์ฉ์ ๊ธฐ๋ฐ์ผ๋ก ์์ธํ ์ค๋ช
๊ณผ Q&A๋ฅผ ์ ๊ณตํ๋ ์ญํ ์ด๋ค.
|
50 |
+
์์ฃผ ์น์ ํ๊ณ ์์ธํ๊ฒ ์ค๋ช
ํ๋ผ."""
|
51 |
+
|
52 |
+
messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}]
|
53 |
|
54 |
for user, assistant in history:
|
55 |
if user:
|
|
|
61 |
|
62 |
response = ""
|
63 |
try:
|
64 |
+
for msg in client.chat_completion(
|
65 |
+
messages,
|
66 |
+
max_tokens=max_tokens,
|
67 |
stream=True,
|
68 |
temperature=temperature,
|
69 |
top_p=top_p,
|
70 |
):
|
71 |
+
if hasattr(msg.choices[0].delta, 'content'):
|
72 |
+
token = msg.choices[0].delta.content
|
73 |
+
if token is not None:
|
74 |
+
response += token
|
75 |
+
yield response
|
76 |
except Exception as e:
|
77 |
yield f"Error: {str(e)}"
|
78 |
|
|
|
112 |
generate(clients[selected_models[2]], history3),
|
113 |
)
|
114 |
|
|
|
|
|
115 |
css = """
|
116 |
+
footer {visibility: hidden}
|
|
|
|
|
117 |
"""
|
118 |
|
119 |
+
with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
|
|
|
|
|
|
|
120 |
with gr.Row():
|
121 |
model_choices = gr.Checkboxgroup(
|
122 |
choices=list(LLM_MODELS.values()),
|
|
|
148 |
with gr.Row():
|
149 |
with gr.Column():
|
150 |
system_message = gr.Textbox(
|
151 |
+
value="๋น์ ์ ์น์ ํ AI ์ด์์คํดํธ์
๋๋ค.",
|
152 |
label="System message"
|
153 |
)
|
154 |
max_tokens = gr.Slider(
|
155 |
minimum=1,
|
156 |
+
maximum=8000,
|
157 |
+
value=4000,
|
158 |
step=1,
|
159 |
label="Max new tokens"
|
160 |
)
|
161 |
temperature = gr.Slider(
|
162 |
+
minimum=0,
|
163 |
+
maximum=1,
|
164 |
value=0.7,
|
165 |
step=0.1,
|
166 |
label="Temperature"
|
167 |
)
|
168 |
top_p = gr.Slider(
|
169 |
+
minimum=0,
|
170 |
+
maximum=1,
|
171 |
+
value=0.9,
|
172 |
step=0.05,
|
173 |
label="Top-p"
|
174 |
)
|
|
|
180 |
placeholder="Enter text and press enter",
|
181 |
container=False
|
182 |
)
|
183 |
+
|
184 |
+
examples = [
|
185 |
+
["์์ธํ ์ฌ์ฉ ๋ฐฉ๋ฒ์ ๋ง์น ํ๋ฉด์ ๋ณด๋ฉด์ ์ค๋ช
ํ๋ฏ์ด 4000 ํ ํฐ ์ด์ ์์ธํ ์ค๋ช
ํ๋ผ"],
|
186 |
+
["FAQ 20๊ฑด์ ์์ธํ๊ฒ ์์ฑํ๋ผ. 4000ํ ํฐ ์ด์ ์ฌ์ฉํ๋ผ."],
|
187 |
+
["์ฌ์ฉ ๋ฐฉ๋ฒ๊ณผ ์ฐจ๋ณ์ , ํน์ง, ๊ฐ์ ์ ์ค์ฌ์ผ๋ก 4000 ํ ํฐ ์ด์ ์ ํ๋ธ ์์ ์คํฌ๋ฆฝํธ ํํ๋ก ์์ฑํ๋ผ"],
|
188 |
+
["๋ณธ ์๋น์ค๋ฅผ SEO ์ต์ ํํ์ฌ ๋ธ๋ก๊ทธ ํฌ์คํธ๋ก 4000 ํ ํฐ ์ด์ ์์ฑํ๋ผ"],
|
189 |
+
["๊ณ์ ์ด์ด์ ๋ต๋ณํ๋ผ"],
|
190 |
+
]
|
191 |
+
|
192 |
+
gr.Examples(
|
193 |
+
examples=examples,
|
194 |
+
inputs=msg_input,
|
195 |
+
cache_examples=False
|
196 |
+
)
|
197 |
|
198 |
def submit_message(message, file):
|
199 |
return respond_all(
|
|
|
219 |
if __name__ == "__main__":
|
220 |
if not HF_TOKEN:
|
221 |
print("Warning: HF_TOKEN environment variable is not set")
|
222 |
+
demo.launch()
|
|