musawar32ali commited on
Commit
96dbdda
·
verified ·
1 Parent(s): f0d9d63

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -0
app.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import os
3
+ import gradio as gr
4
+ from google import genai
5
+
6
+ # Read API key from env (set this as a Space secret on HF: GEMINI_API_KEY)
7
+ api_key = os.environ.get("GEMINI_API_KEY")
8
+ client = genai.Client(api_key=api_key)
9
+ MODEL = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash")
10
+
11
+
12
+ def call_gemini(prompt: str) -> str:
13
+ """Call Gemini synchronously and return text reply (safe fallback)."""
14
+ try:
15
+ response = client.models.generate_content(
16
+ model=MODEL,
17
+ contents=prompt,
18
+ )
19
+ # SDK commonly exposes textual output as .text
20
+ return getattr(response, "text", str(response))
21
+ except Exception as e:
22
+ return f"[Error calling Gemini API: {e}]"
23
+
24
+
25
+ def generate_reply(message: str, history: list) -> list:
26
+ """
27
+ Given a user message and current `history` (list of role/content dicts),
28
+ append the user's message, call Gemini, append the assistant reply, and
29
+ return the updated history (in Gradio 'messages' format).
30
+ """
31
+ if history is None:
32
+ history = []
33
+
34
+ user_message = message.strip()
35
+ if not user_message:
36
+ return history
37
+
38
+ # Append user message (messages format)
39
+ history.append({"role": "user", "content": user_message})
40
+
41
+ # Option A: send only the current user message as prompt:
42
+ # reply_text = call_gemini(user_message)
43
+
44
+ # Option B: (commented) send the full conversation as a single prompt for more context.
45
+ # Uncomment if you want multi-turn context included.
46
+ # combined_prompt = "\n".join(
47
+ # f"{m['role']}: {m['content']}" for m in history
48
+ # )
49
+ # reply_text = call_gemini(combined_prompt)
50
+
51
+ # Using Option A by default:
52
+ reply_text = call_gemini(user_message)
53
+
54
+ # Append assistant response
55
+ history.append({"role": "assistant", "content": reply_text})
56
+ return history
57
+
58
+
59
+ with gr.Blocks(title="Gemini Chatbot") as demo:
60
+ gr.Markdown("# Gemini Chatbot (Gradio — messages format)")
61
+
62
+ # Use the new 'messages' type so Gradio won't warn about tuples->messages deprecation
63
+ chatbot = gr.Chatbot(label="Gemini", type="messages")
64
+ state = gr.State([]) # will hold the list of {"role","content"} dicts
65
+
66
+ with gr.Row():
67
+ txt = gr.Textbox(
68
+ show_label=False,
69
+ placeholder="Type your message and press Enter...",
70
+ lines=1,
71
+ )
72
+
73
+ def user_submit(message, history):
74
+ # Ensure history is a list
75
+ if history is None:
76
+ history = []
77
+ updated = generate_reply(message, history)
78
+ # Gradio expects (chatbot, state) outputs; we return the updated messages list for both
79
+ return updated, updated
80
+
81
+ txt.submit(fn=user_submit, inputs=[txt, state], outputs=[chatbot, state])
82
+ gr.Button("Clear").click(lambda: ([], []), None, [chatbot, state], queue=False)
83
+
84
+ if __name__ == "__main__":
85
+ demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))