ziffir commited on
Commit
08abab8
1 Parent(s): 6eea322

Upload app (2).py

Browse files
Files changed (1) hide show
  1. app (2).py +227 -0
app (2).py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ from typing import List, Tuple, Optional
4
+
5
+ import google.generativeai as genai
6
+ import gradio as gr
7
+ from PIL import Image
8
+
9
+ print("google-generativeai:", genai.__version__)
10
+
11
+ GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
12
+
13
+ TITLE = """<h1 align="center">Gemini Playground 😎</h1>"""
14
+ SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision API 🖇️</h2>"""
15
+ DUPLICATE = """
16
+ <div style="text-align: center; display: flex; justify-content: center; align-items: center;">
17
+ <a href="https://huggingface.co/spaces/SkalskiP/ChatGemini?duplicate=true">
18
+ <img src="https://bit.ly/3gLdBN6" alt="Duplicate Space" style="margin-right: 10px;">
19
+ </a>
20
+ <span>Duplicate the Space and run securely with your
21
+ <a href="https://makersuite.google.com/app/apikey">GOOGLE API KEY</a>.
22
+ </span>
23
+ </div>
24
+ """
25
+
26
+
27
+ IMAGE_WIDTH = 512
28
+
29
+
30
+ def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
31
+ if not stop_sequences:
32
+ return None
33
+ return [sequence.strip() for sequence in stop_sequences.split(",")]
34
+
35
+
36
+ def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
37
+ image_height = int(image.height * IMAGE_WIDTH / image.width)
38
+ return image.resize((IMAGE_WIDTH, image_height))
39
+
40
+
41
+ def user(text_prompt: str, chatbot: List[Tuple[str, str]]):
42
+ return "", chatbot + [[text_prompt, None]]
43
+
44
+
45
+ def bot(
46
+ google_key: str,
47
+ image_prompt: Optional[Image.Image],
48
+ temperature: float,
49
+ max_output_tokens: int,
50
+ stop_sequences: str,
51
+ top_k: int,
52
+ top_p: float,
53
+ chatbot: List[Tuple[str, str]]
54
+ ):
55
+ google_key = google_key if google_key else GOOGLE_API_KEY
56
+ if not google_key:
57
+ raise ValueError(
58
+ "GOOGLE_API_KEY is not set. "
59
+ "Please follow the instructions in the README to set it up.")
60
+
61
+ text_prompt = chatbot[-1][0]
62
+ genai.configure(api_key=google_key)
63
+ generation_config = genai.types.GenerationConfig(
64
+ temperature=temperature,
65
+ max_output_tokens=max_output_tokens,
66
+ stop_sequences=preprocess_stop_sequences(stop_sequences=stop_sequences),
67
+ top_k=top_k,
68
+ top_p=top_p)
69
+
70
+ if image_prompt is None:
71
+ model = genai.GenerativeModel('gemini-pro')
72
+ response = model.generate_content(
73
+ text_prompt,
74
+ stream=True,
75
+ generation_config=generation_config)
76
+ response.resolve()
77
+ else:
78
+ image_prompt = preprocess_image(image_prompt)
79
+ model = genai.GenerativeModel('gemini-pro-vision')
80
+ response = model.generate_content(
81
+ contents=[text_prompt, image_prompt],
82
+ stream=True,
83
+ generation_config=generation_config)
84
+ response.resolve()
85
+
86
+ # streaming effect
87
+ chatbot[-1][1] = ""
88
+ for chunk in response:
89
+ for i in range(0, len(chunk.text), 10):
90
+ section = chunk.text[i:i + 10]
91
+ chatbot[-1][1] += section
92
+ time.sleep(0.01)
93
+ yield chatbot
94
+
95
+
96
+ google_key_component = gr.Textbox(
97
+ label="GOOGLE API KEY",
98
+ value="",
99
+ type="password",
100
+ placeholder="...",
101
+ info="You have to provide your own GOOGLE_API_KEY for this app to function properly",
102
+ visible=GOOGLE_API_KEY is None
103
+ )
104
+
105
+ image_prompt_component = gr.Image(type="pil", label="Image", scale=1)
106
+ chatbot_component = gr.Chatbot(
107
+ label='Gemini',
108
+ bubble_full_width=False,
109
+ scale=2
110
+ )
111
+ text_prompt_component = gr.Textbox(
112
+ placeholder="Hi there!",
113
+ label="Ask me anything and press Enter"
114
+ )
115
+ run_button_component = gr.Button()
116
+ temperature_component = gr.Slider(
117
+ minimum=0,
118
+ maximum=1.0,
119
+ value=0.4,
120
+ step=0.05,
121
+ label="Temperature",
122
+ info=(
123
+ "Temperature controls the degree of randomness in token selection. Lower "
124
+ "temperatures are good for prompts that expect a true or correct response, "
125
+ "while higher temperatures can lead to more diverse or unexpected results. "
126
+ ))
127
+ max_output_tokens_component = gr.Slider(
128
+ minimum=1,
129
+ maximum=2048,
130
+ value=1024,
131
+ step=1,
132
+ label="Token limit",
133
+ info=(
134
+ "Token limit determines the maximum amount of text output from one prompt. A "
135
+ "token is approximately four characters. The default value is 2048."
136
+ ))
137
+ stop_sequences_component = gr.Textbox(
138
+ label="Add stop sequence",
139
+ value="",
140
+ type="text",
141
+ placeholder="STOP, END",
142
+ info=(
143
+ "A stop sequence is a series of characters (including spaces) that stops "
144
+ "response generation if the model encounters it. The sequence is not included "
145
+ "as part of the response. You can add up to five stop sequences."
146
+ ))
147
+ top_k_component = gr.Slider(
148
+ minimum=1,
149
+ maximum=40,
150
+ value=32,
151
+ step=1,
152
+ label="Top-K",
153
+ info=(
154
+ "Top-k changes how the model selects tokens for output. A top-k of 1 means the "
155
+ "selected token is the most probable among all tokens in the model’s "
156
+ "vocabulary (also called greedy decoding), while a top-k of 3 means that the "
157
+ "next token is selected from among the 3 most probable tokens (using "
158
+ "temperature)."
159
+ ))
160
+ top_p_component = gr.Slider(
161
+ minimum=0,
162
+ maximum=1,
163
+ value=1,
164
+ step=0.01,
165
+ label="Top-P",
166
+ info=(
167
+ "Top-p changes how the model selects tokens for output. Tokens are selected "
168
+ "from most probable to least until the sum of their probabilities equals the "
169
+ "top-p value. For example, if tokens A, B, and C have a probability of .3, .2, "
170
+ "and .1 and the top-p value is .5, then the model will select either A or B as "
171
+ "the next token (using temperature). "
172
+ ))
173
+
174
+ user_inputs = [
175
+ text_prompt_component,
176
+ chatbot_component
177
+ ]
178
+
179
+ bot_inputs = [
180
+ google_key_component,
181
+ image_prompt_component,
182
+ temperature_component,
183
+ max_output_tokens_component,
184
+ stop_sequences_component,
185
+ top_k_component,
186
+ top_p_component,
187
+ chatbot_component
188
+ ]
189
+
190
+ with gr.Blocks() as demo:
191
+ gr.HTML(TITLE)
192
+ gr.HTML(SUBTITLE)
193
+ gr.HTML(DUPLICATE)
194
+ with gr.Column():
195
+ google_key_component.render()
196
+ with gr.Row():
197
+ image_prompt_component.render()
198
+ chatbot_component.render()
199
+ text_prompt_component.render()
200
+ run_button_component.render()
201
+ with gr.Accordion("Parameters", open=False):
202
+ temperature_component.render()
203
+ max_output_tokens_component.render()
204
+ stop_sequences_component.render()
205
+ with gr.Accordion("Advanced", open=False):
206
+ top_k_component.render()
207
+ top_p_component.render()
208
+
209
+ run_button_component.click(
210
+ fn=user,
211
+ inputs=user_inputs,
212
+ outputs=[text_prompt_component, chatbot_component],
213
+ queue=False
214
+ ).then(
215
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
216
+ )
217
+
218
+ text_prompt_component.submit(
219
+ fn=user,
220
+ inputs=user_inputs,
221
+ outputs=[text_prompt_component, chatbot_component],
222
+ queue=False
223
+ ).then(
224
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
225
+ )
226
+
227
+ demo.queue(max_size=99).launch(debug=False, show_error=True)