TheBlueberry-AI commited on
Commit
5b6e4b3
1 Parent(s): faff147

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +213 -0
app.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ from typing import List, Tuple, Optional, Dict
4
+
5
+ import google.generativeai as genai
6
+ import gradio as gr
7
+ from PIL import Image
8
+
9
+ print("google-generativeai:", genai.__version__)
10
+
11
+ GGL_API_KEY = os.environ.get("GGL_API_KEY")
12
+ gglusr = os.environ.get("GGL_USR")
13
+ gglpwd = os.environ.get("GGL_PWD")
14
+
15
+ TITLE = """<h2 align="center">🫐Blueberry-AI ✨Gemini Chat🫐</h2>"""
16
+ IMAGE_WIDTH = 512
17
+
18
+
19
+ def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
20
+ if not stop_sequences:
21
+ return None
22
+ return [sequence.strip() for sequence in stop_sequences.split(",")]
23
+
24
+
25
+ def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
26
+ image_height = int(image.height * IMAGE_WIDTH / image.width)
27
+ return image.resize((IMAGE_WIDTH, image_height))
28
+
29
+
30
+ def preprocess_chat_history(
31
+ history: List[Tuple[Optional[str], Optional[str]]]
32
+ ) -> List[Dict[str, List[str]]]:
33
+ messages = []
34
+ for user_message, model_message in history:
35
+ if user_message is not None:
36
+ messages.append({'role': 'user', 'parts': [user_message]})
37
+ if model_message is not None:
38
+ messages.append({'role': 'model', 'parts': [model_message]})
39
+ return messages
40
+
41
+
42
+ def user(text_prompt: str, chatbot: List[Tuple[str, str]]):
43
+ return "", chatbot + [[text_prompt, None]]
44
+
45
+
46
+ def bot(
47
+ image_prompt: Optional[Image.Image],
48
+ temperature: float,
49
+ max_output_tokens: int,
50
+ stop_sequences: str,
51
+ top_k: int,
52
+ top_p: float,
53
+ chatbot: List[Tuple[str, str]]
54
+ ):
55
+
56
+ text_prompt = chatbot[-1][0]
57
+ genai.configure(api_key=GGL_API_KEY)
58
+ generation_config = genai.types.GenerationConfig(
59
+ temperature=temperature,
60
+ max_output_tokens=max_output_tokens,
61
+ stop_sequences=preprocess_stop_sequences(stop_sequences=stop_sequences),
62
+ top_k=top_k,
63
+ top_p=top_p)
64
+
65
+ if image_prompt is None:
66
+ model = genai.GenerativeModel('gemini-pro')
67
+ response = model.generate_content(
68
+ preprocess_chat_history(chatbot),
69
+ stream=True,
70
+ generation_config=generation_config)
71
+ response.resolve()
72
+ else:
73
+ image_prompt = preprocess_image(image_prompt)
74
+ model = genai.GenerativeModel('gemini-pro-vision')
75
+ response = model.generate_content(
76
+ contents=[text_prompt, image_prompt],
77
+ stream=True,
78
+ generation_config=generation_config)
79
+ response.resolve()
80
+
81
+ # streaming effect
82
+ chatbot[-1][1] = ""
83
+ for chunk in response:
84
+ for i in range(0, len(chunk.text), 10):
85
+ section = chunk.text[i:i + 10]
86
+ chatbot[-1][1] += section
87
+ time.sleep(0.01)
88
+ yield chatbot
89
+
90
+
91
+ image_prompt_component = gr.Image(type="pil", label="Image", scale=1, height=400)
92
+ chatbot_component = gr.Chatbot(
93
+ label='Gemini',
94
+ bubble_full_width=False,
95
+ avatar_images=("./usr.png", "./bot.png"),
96
+ likeable=True,
97
+ show_copy_button=True,
98
+ scale=2,
99
+ height=400
100
+ )
101
+ text_prompt_component = gr.Textbox(
102
+ placeholder="Hi there!",
103
+ label="Ask me anything and press Enter"
104
+ )
105
+ run_button_component = gr.Button()
106
+ temperature_component = gr.Slider(
107
+ minimum=0,
108
+ maximum=1.0,
109
+ value=0.4,
110
+ step=0.05,
111
+ label="Temperature",
112
+ info=(
113
+ "Temperature controls the degree of randomness in token selection. Lower "
114
+ "temperatures are good for prompts that expect a true or correct response, "
115
+ "while higher temperatures can lead to more diverse or unexpected results. "
116
+ ))
117
+ max_output_tokens_component = gr.Slider(
118
+ minimum=1,
119
+ maximum=2048,
120
+ value=1024,
121
+ step=1,
122
+ label="Token limit",
123
+ info=(
124
+ "Token limit determines the maximum amount of text output from one prompt. A "
125
+ "token is approximately four characters. The default value is 2048."
126
+ ))
127
+ stop_sequences_component = gr.Textbox(
128
+ label="Add stop sequence",
129
+ value="",
130
+ type="text",
131
+ placeholder="STOP, END",
132
+ info=(
133
+ "A stop sequence is a series of characters (including spaces) that stops "
134
+ "response generation if the model encounters it. The sequence is not included "
135
+ "as part of the response. You can add up to five stop sequences."
136
+ ))
137
+ top_k_component = gr.Slider(
138
+ minimum=1,
139
+ maximum=40,
140
+ value=32,
141
+ step=1,
142
+ label="Top-K",
143
+ info=(
144
+ "Top-k changes how the model selects tokens for output. A top-k of 1 means the "
145
+ "selected token is the most probable among all tokens in the model’s "
146
+ "vocabulary (also called greedy decoding), while a top-k of 3 means that the "
147
+ "next token is selected from among the 3 most probable tokens (using "
148
+ "temperature)."
149
+ ))
150
+ top_p_component = gr.Slider(
151
+ minimum=0,
152
+ maximum=1,
153
+ value=1,
154
+ step=0.01,
155
+ label="Top-P",
156
+ info=(
157
+ "Top-p changes how the model selects tokens for output. Tokens are selected "
158
+ "from most probable to least until the sum of their probabilities equals the "
159
+ "top-p value. For example, if tokens A, B, and C have a probability of .3, .2, "
160
+ "and .1 and the top-p value is .5, then the model will select either A or B as "
161
+ "the next token (using temperature). "
162
+ ))
163
+
164
+ user_inputs = [
165
+ text_prompt_component,
166
+ chatbot_component
167
+ ]
168
+
169
+ bot_inputs = [
170
+ image_prompt_component,
171
+ temperature_component,
172
+ max_output_tokens_component,
173
+ stop_sequences_component,
174
+ top_k_component,
175
+ top_p_component,
176
+ chatbot_component
177
+ ]
178
+
179
+ with gr.Blocks() as demo:
180
+ gr.HTML(TITLE)
181
+ with gr.Column():
182
+ with gr.Row():
183
+ image_prompt_component.render()
184
+ chatbot_component.render()
185
+ text_prompt_component.render()
186
+ run_button_component.render()
187
+ with gr.Accordion("Parameters", open=False):
188
+ temperature_component.render()
189
+ max_output_tokens_component.render()
190
+ stop_sequences_component.render()
191
+ with gr.Accordion("Advanced", open=False):
192
+ top_k_component.render()
193
+ top_p_component.render()
194
+
195
+ run_button_component.click(
196
+ fn=user,
197
+ inputs=user_inputs,
198
+ outputs=[text_prompt_component, chatbot_component],
199
+ queue=False
200
+ ).then(
201
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
202
+ )
203
+
204
+ text_prompt_component.submit(
205
+ fn=user,
206
+ inputs=user_inputs,
207
+ outputs=[text_prompt_component, chatbot_component],
208
+ queue=False
209
+ ).then(
210
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
211
+ )
212
+
213
+ demo.queue(max_size=99).launch(auth=(gglusr, gglpwd),show_api=False, debug=False, show_error=True)