Update app.py
Browse files
app.py
CHANGED
@@ -13,6 +13,7 @@ import gradio as gr
|
|
13 |
from huggingface_hub import InferenceClient
|
14 |
|
15 |
# added
|
|
|
16 |
from google import genai
|
17 |
|
18 |
print("\nEnd import library\n=========")
|
@@ -23,6 +24,18 @@ print("\nEnd import library\n=========")
|
|
23 |
|
24 |
print("=========\nBegin definition Backend Logic\n")
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
def random_response(message, history):
|
27 |
return random.choice(["Yes", "No"])
|
28 |
|
|
|
13 |
from huggingface_hub import InferenceClient
|
14 |
|
15 |
# added
|
16 |
+
import os
|
17 |
from google import genai
|
18 |
|
19 |
print("\nEnd import library\n=========")
|
|
|
24 |
|
25 |
print("=========\nBegin definition Backend Logic\n")
|
26 |
|
27 |
+
print("Create default API settings")
|
28 |
+
client = genai.Client(
|
29 |
+
api_key=os.getenv("GEMINI_API_KEY"),
|
30 |
+
http_options=types.HttpOptions(api_version='v1alpha'),
|
31 |
+
)
|
32 |
+
used_model = "gemini-2.5-flash-preview-04-17"
|
33 |
+
|
34 |
+
def model_response(message, history):
|
35 |
+
chat = client.chats.create(model=used_model, history=history)
|
36 |
+
return response = chat.send_message(message)
|
37 |
+
|
38 |
+
print("Create test response work")
|
39 |
def random_response(message, history):
|
40 |
return random.choice(["Yes", "No"])
|
41 |
|