File size: 12,315 Bytes
1b4edb5
 
 
b22fe89
 
 
 
b02406b
 
63ed7b5
3ebb93c
ed79e96
 
 
63ed7b5
3436dbe
c245fa7
63ed7b5
b02406b
 
b22fe89
 
 
 
b02406b
 
3436dbe
ef3c725
2e81097
3436dbe
ef3c725
f5e1fc3
3436dbe
f5e1fc3
3436dbe
 
ef3c725
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3436dbe
9f0898f
aaef489
ef3c725
 
 
9f0898f
ef3c725
9f0898f
d819a44
 
 
 
 
 
3436dbe
1dc5b22
 
 
 
f5e1fc3
96398c2
 
01911c9
b02406b
b22fe89
 
 
 
 
b02406b
 
2fcf433
98dc61a
def6a7b
 
2fcf433
98dc61a
 
d21d723
 
 
 
98dc61a
1b4edb5
98dc61a
0eb290b
 
 
 
98dc61a
 
4e6ab17
98dc61a
0eb290b
 
 
 
98dc61a
0eb290b
 
 
 
98dc61a
 
1b4edb5
 
 
98dc61a
 
1b4edb5
 
 
98dc61a
 
1b4edb5
 
 
98dc61a
 
d12806e
0eb290b
 
 
78561fc
0eb290b
 
a93a187
0eb290b
e6b4267
0eb290b
 
 
 
 
98dc61a
 
1b4edb5
98dc61a
1b4edb5
98dc61a
 
d21d723
0eb290b
 
d21d723
98dc61a
1b4edb5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0eb290b
 
98dc61a
 
1b4edb5
98dc61a
1b4edb5
0eb290b
 
 
 
98dc61a
1b4edb5
0eb290b
 
 
 
98dc61a
1b4edb5
0eb290b
 
98dc61a
1b4edb5
0eb290b
 
98dc61a
1b4edb5
0eb290b
 
ed79e96
b02406b
 
 
 
ed79e96
b02406b
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
# Попробую писать коментарии на двух языках.
# I'll try to write bilingual comments.

#=========
#=========
# Library Import

print("=========\nBegin import library\n")

# default 
import random
import gradio as gr
from huggingface_hub import InferenceClient

# added
import os
from google import genai

print("\nEnd import library\n=========")

#=========
#=========
# Backend Logic

print("=========\nBegin definition Backend Logic\n")

print("Create default API settings")
gemini_API_key=os.getenv("GEMINI_API_KEY")

client = genai.Client(
                        api_key=gemini_API_key,
                        http_options=genai.types.HttpOptions(api_version='v1alpha'),
                        )
print("Set default model")
used_model = "gemini-2.5-flash-preview-04-17"

print("define class format_history")
class format_history:
    @staticmethod
    def format_for_gemini(history):
        print("Format history")
        gemini_history = []
    
        for message_to_check in history:    
            if message_to_check.get("role") == "user" :
                gemini_history.append({
                "role": "user",
                "parts": [{"text": message_to_check.get("content", "")}]
                })
            elif message_to_check.get("role") == "assistant" :
                gemini_history.append({
                "role": "model",
                "parts": [{"text": message_to_check.get("content", "")}]
                })
        return gemini_history

print("define class gemini api actions")
class gemini_API_actions:
    apy_key = ""
    used_model = ""
    
    @staticmethod
    def set_used_model(new_model: str):
        used_model = new_model
        return

    @staticmethod
    def set_API_key(new_api_key: str):
        apy_key = new_api_key
        return

    @staticmethod
    def response_to_model(user_message, history):
        print(f"\n=========\nUser message\n{user_message}\n")

        formated_history = format_history.format_for_gemini(history)
        
        print(f"his: {formated_history}")
        print("Create chat")
        chat = client.chats.create(model=this.used_model, history=formated_history)
        
        print("Start response")
        response = chat.send_message(message["text"])
        print(f"\nResponse\n{response}=========\n")
        
        return response.text

print("Define response work body")
def model_response(message, history):
    print(f"\n=========\nUser message\n{message}\n")

    formated_history = format_history.format_for_gemini(history)

    print(f"his: {formated_history}")
    print("Create chat")
    chat = client.chats.create(model=used_model, history=formated_history)

    try:
        print("Start response")
        response = chat.send_message(message["text"])
        print(f"\nResponse\n{response}=========\n")
        
        return response.text

    except Exception as e:
        print(f"\n=== Error ===\n{str(e)}")
        return f"I apologize, but I encountered an error: {str(e)}"

print("Define test1 response work")
def random_response(message, history):
    return random.choice(["Yes", "No"])

print("\nEnd definition Backend Logic\n=========")

# =========
# =========
# User Interface (UI) Definition

print("=========\nBegin definition User Interface (UI)\n")

with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo: # Using Soft theme with adjusted hues for a refined look
    print("Create visitor badge")
    gr.HTML("""<a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fzelk12%2FChat_interface_test_With_backend">
               <img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fzelk12%2FChat_interface_test_With_backend&countColor=%23263759" />
               </a>""")

    print("Create API block")
    with gr.Accordion(
      "API",
       open=False,
    ):
        print("Create API key textbox row")
        with gr.Row():
            print("Create API key textbox")
            Textbox_Block_API_key = gr.Textbox(
                label="API key",
                scale=4,
            )
            
        print("Create API block button row")
        with gr.Row():
            print("Create API apply button")
            Button_Block_Apply_API_key = gr.Button(
                value="Apply",
                scale=1,
            )
            print("Create API reset button")
            Button_Block_Reset_API_key = gr.Button(
                value="Reset",
                scale=1,
            )

        print("Create API state markdown")
        Markdown_Block_API_key_State = gr.Markdown("API key State: False")
        # Возможные варианты, используеться стандартный API ключь и используеться выбранный API ключь.
        # Possible options, standard API key is used and selected API key is used.

        print("Create provider dropdown")
        Dropdown_Block_Choose_provider = gr.Dropdown(label="Choose provider")
        # На данный момент времени единственным провайдером будет Google и Google API для доступа к Gemini. (19.04.2025)
        # At this point in time, the only provider will be Google and the Google API to access Gemini. (19.04.2025)

        print("Create provider state")
        Markdown_Block_Povider_State = gr.Markdown("Provider State: False")
        # Варианты используеться, выбранный провайдер и указание его наименования и провайдер не используеться, и причины.
        # The options are used, the selected provider and the name of the provider and the provider not used, and the reasons.

    print("Create main chat window")
    ChatIntarface_Block_Main_chat_window = gr.ChatInterface(model_response, 
                                                            multimodal=True,
                                                            chatbot=gr.Chatbot(
                                                                label="output",
                                                                type="messages",
                                                            ),
                                                            type="messages",
                                                            textbox=gr.MultimodalTextbox(
                                                                label="input",
                                                                max_plain_text_length=9999999,
                                                            ),
                                                            editable=True,
                                                            title="Chat interface test",
                                                            save_history=True,
                                                            )
    
    print("Create output token markdown")
    Markdown_Block_Output_token = gr.Markdown("Token in output: False")
    print("Create input token markdown")
    Markdown_Block_Input_token = gr.Markdown("Token in input: False")

    print("Create ssettings block")
    with gr.Accordion(
        "Settings",
        open=False,
    ):
        print("Create model dropdown")
        Dropdown_Block_Choose_model = gr.Dropdown(label="Choose model")
        # От Google будут использованы следующие модели: Gemini 2.5 Flash Preview 04-17 (10 зпаросов в минуту, 250.000 токенов в минуту, 500 запросов в день) (19.04.2025)
        # Gemini 2.5 Pro Experimental (5 зпаросов в минуту, 250.000 токенов в минуту, 25 запросов в день) (19.04.2025)
        # Gemini 2.0 Flash (15 зпаросов в минуту, 1.000.000 токенов в минуту, 1.500 запросов в день) (19.04.2025)
        # Gemini 2.0 Flash Experimental  (10 зпаросов в минуту, 1.000.000 токенов в минуту, 1.500 запросов в день) (19.04.2025)
        # Gemini 2.0 Flash-Lite  (30 зпаросов в минуту, 1.000.000 токенов в минуту, 1.500 запросов в день) (19.04.2025)
        # Gemini 1.5 Flash  (15 зпаросов в минуту, 1.000.000 токенов в минуту, 1.500 запросов в день) (19.04.2025)
        # Gemini 1.5 Flash-8B  (15 зпаросов в минуту, 1.000.000 токенов в минуту, 1.500 запросов в день) (19.04.2025)
        # Gemini 1.5 Pro  (2 зпаросов в минуту, 32.000 токенов в минуту, 50 запросов в день) (19.04.2025)
        # Gemma 3  (30 зпаросов в минуту, 15.000 токенов в минуту, 14.400 запросов в день) (19.04.2025)
        # По умалчанию будет выбрана модель: Gemini 2.0 Flash Experimental (19.04.2025)
        #
        # The following models will be used from Google: Gemini 2.5 Flash Preview 04-17 (10 zparos per minute, 250,000 tokens per minute, 500 requests per day) (19.04.2025)
        # Gemini 2.5 Pro Experimental (5 drops per minute, 250,000 tokens per minute, 25 requests per day) (19.04.2025)
        # Gemini 2.0 Flash (15 drops per minute, 1,000,000 tokens per minute, 1,500 requests per day) (19.04.2025)
        # Gemini 2.0 Flash Experimental (10 drops per minute, 1,000,000 tokens per minute, 1,500 requests per day) (19.04.2025)
        # Gemini 2.0 Flash-Lite (30 drops per minute, 1,000,000 tokens per minute, 1,500 requests per day) (19.04.2025)
        # Gemini 1.5 Flash (15 drops per minute, 1,000,000 tokens per minute, 1,500 requests per day) (19.04.2025)
        # Gemini 1.5 Flash-8B (15 drops per minute, 1,000,000 tokens per minute, 1,500 requests per day) (19.04.2025)
        # Gemini 1.5 Pro (2 drops per minute, 32,000 tokens per minute, 50 requests per day) (19.04.2025)
        # Gemma 3 (30 zparos per minute, 15.000 tokens per minute, 14.400 requests per day) (19.04.2025)
        # Gemini 2.0 Flash Experimental will be chosen by default. (19.04.2025)
        # Translated with www.DeepL.com/Translator (free version)

        # Я даже не буду исправлять этот преревод.
        # I'm not even gonna fix this pre-translation #

        print("Create system instructions textbox")
        Textbox_Block_System_instructions = gr.Textbox(label="System instructions",)
        print("Create slider model temperature")
        Slier_Block_Model_Temperature = gr.Slider(label="temperature",
                                                    interactive=True,
                                                    minimum=0,
                                                    maximum=2,
                                                    value=0.95)
        print("Create slider model topP")
        Slier_Block_Model_topP = gr.Slider(label="topP",
                                            interactive=True,
                                            minimum=0,
                                            maximum=1,
                                            value=0.5)
        print("Create slider model topK")
        Slier_Block_Model_topK = gr.Slider(label="topK",
                                            interactive=True,
                                            value=100)
        print("Create checkbox output stream")
        Checkbox_Block_Output_Stream = gr.Checkbox(
                                                    label="Enable output stream"
                                                    )
        print("Create checkbox Grounding with Google Search")
        Checkbox_Block_Google_Grounding_Search = gr.Checkbox(
                                                                label="Grounding with Google Search"
                                                                )

print("\nEnd definition User Interface (UI)\n=========")

print("=========\nBegin launch demo\n")

if __name__ == "__main__":
    demo.launch()

print("\nEnd launch demo\n=========")