Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,16 @@
|
|
1 |
-
# imports
|
2 |
import gradio as gr
|
3 |
import requests
|
4 |
import json
|
5 |
import os
|
6 |
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
#
|
9 |
-
def generate(description,
|
10 |
if not description:
|
11 |
return None, None
|
12 |
|
@@ -16,9 +20,9 @@ def generate(description, model, max_tokens):
|
|
16 |
}
|
17 |
|
18 |
payload = {
|
19 |
-
'messages': [{'role': 'system', 'content':
|
20 |
'max_tokens': max_tokens,
|
21 |
-
'model':
|
22 |
}
|
23 |
|
24 |
try:
|
@@ -41,7 +45,6 @@ def generate(description, model, max_tokens):
|
|
41 |
print(f"Ошибка запроса: {e}")
|
42 |
return f"**Ошибка запроса!**\n\n```\n{e}\n```", f"Ошибка запроса!\n\n{e}"
|
43 |
|
44 |
-
|
45 |
# Ссылка на файл CSS
|
46 |
css_url = "https://neurixyufi-aihub.static.hf.space/style.css"
|
47 |
|
@@ -49,7 +52,7 @@ css_url = "https://neurixyufi-aihub.static.hf.space/style.css"
|
|
49 |
response = requests.get(css_url)
|
50 |
css = response.text + ".gradio-container{max-width: 700px !important} h1{text-align:center}"
|
51 |
|
52 |
-
#
|
53 |
with gr.Blocks(css=css) as demo:
|
54 |
gr.Markdown("# EasyGemini")
|
55 |
with gr.Tab("Запрос"):
|
@@ -57,7 +60,8 @@ with gr.Blocks(css=css) as demo:
|
|
57 |
promt = gr.Textbox(show_label=True, label="Запрос", lines=3)
|
58 |
with gr.Tab("Настройки"):
|
59 |
with gr.Row():
|
60 |
-
|
|
|
61 |
with gr.Row():
|
62 |
max_tokens = gr.Slider(show_label=True, label="Максимальное количество токенов", minimum=100, maximum=8000, value=4000, step=1)
|
63 |
with gr.Row():
|
@@ -68,6 +72,6 @@ with gr.Blocks(css=css) as demo:
|
|
68 |
with gr.Accordion(label="Без Markdown", open=False):
|
69 |
text_output_nm = gr.Textbox(show_label=False, value="**Здравствуйте!** Чем я могу Вам помочь сегодня?", lines=3)
|
70 |
|
71 |
-
text_button.click(generate, inputs=[promt,
|
72 |
|
73 |
-
demo.queue(api_open=False, max_size=150).launch()
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import requests
|
3 |
import json
|
4 |
import os
|
5 |
|
6 |
+
# Функция для загрузки системной роли из JSON файла
|
7 |
+
def load_system_role(role_name):
|
8 |
+
with open('system_roles.json', 'r', encoding='utf-8') as file:
|
9 |
+
roles = json.load(file)
|
10 |
+
return roles.get(role_name, "Ты помощник по умолчанию.")
|
11 |
|
12 |
+
# Функция для генерации текста
|
13 |
+
def generate(description, system_role, max_tokens):
|
14 |
if not description:
|
15 |
return None, None
|
16 |
|
|
|
20 |
}
|
21 |
|
22 |
payload = {
|
23 |
+
'messages': [{'role': 'system', 'content': system_role}, {'role': 'user', 'content': description}],
|
24 |
'max_tokens': max_tokens,
|
25 |
+
'model': "gemini-1.5-pro-latest"
|
26 |
}
|
27 |
|
28 |
try:
|
|
|
45 |
print(f"Ошибка запроса: {e}")
|
46 |
return f"**Ошибка запроса!**\n\n```\n{e}\n```", f"Ошибка запроса!\n\n{e}"
|
47 |
|
|
|
48 |
# Ссылка на файл CSS
|
49 |
css_url = "https://neurixyufi-aihub.static.hf.space/style.css"
|
50 |
|
|
|
52 |
response = requests.get(css_url)
|
53 |
css = response.text + ".gradio-container{max-width: 700px !important} h1{text-align:center}"
|
54 |
|
55 |
+
# UI
|
56 |
with gr.Blocks(css=css) as demo:
|
57 |
gr.Markdown("# EasyGemini")
|
58 |
with gr.Tab("Запрос"):
|
|
|
60 |
promt = gr.Textbox(show_label=True, label="Запрос", lines=3)
|
61 |
with gr.Tab("Настройки"):
|
62 |
with gr.Row():
|
63 |
+
with gr.Accordion(label="Помощник", open=True):
|
64 |
+
helper_role = gr.Radio(show_label=True, label="Выберите помощника", interactive=True, choices=["Обычный помощник", "Симулятор Linux консоли"], value="Обычный помощник")
|
65 |
with gr.Row():
|
66 |
max_tokens = gr.Slider(show_label=True, label="Максимальное количество токенов", minimum=100, maximum=8000, value=4000, step=1)
|
67 |
with gr.Row():
|
|
|
72 |
with gr.Accordion(label="Без Markdown", open=False):
|
73 |
text_output_nm = gr.Textbox(show_label=False, value="**Здравствуйте!** Чем я могу Вам помочь сегодня?", lines=3)
|
74 |
|
75 |
+
text_button.click(generate, inputs=[promt, helper_role, max_tokens], outputs=[text_output, text_output_nm])
|
76 |
|
77 |
+
demo.queue(api_open=False, max_size=150).launch()
|