ggm-chat / app.py
Tomoniai's picture
Create app.py
7536f52
raw history blame
No virus
6.72 kB
import os
import time
from typing import List, Tuple, Optional, Dict
import google.generativeai as genai
import gradio as gr
from PIL import Image
print("google-generativeai:", genai.__version__)
GG_API_KEY = os.environ.get("GG_API_KEY")
oaiusr = os.environ.get("OAI_USR")
oaipwd = os.environ.get("OAI_PWD")
TITLE = """<h2 align="center">Tomoniai's Gemini Pro Chat</h2>"""
AVATAR_IMAGES = ("./user.png", "./botg.png")
IMAGE_WIDTH = 512
def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
if not stop_sequences:
return None
return [sequence.strip() for sequence in stop_sequences.split(",")]
def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
image_height = int(image.height * IMAGE_WIDTH / image.width)
return image.resize((IMAGE_WIDTH, image_height))
def preprocess_chat_history(
history: List[Tuple[Optional[str], Optional[str]]]
) -> List[Dict[str, List[str]]]:
messages = []
for user_message, model_message in history:
if user_message is not None:
messages.append({'role': 'user', 'parts': [user_message]})
if model_message is not None:
messages.append({'role': 'model', 'parts': [model_message]})
return messages
def user(text_prompt: str, chatbot: List[Tuple[str, str]]):
return "", chatbot + [[text_prompt, None]]
def bot(
google_key: str,
image_prompt: Optional[Image.Image],
temperature: float,
max_output_tokens: int,
stop_sequences: str,
top_k: int,
top_p: float,
chatbot: List[Tuple[str, str]]
):
google_key = google_key if google_key else GG_API_KEY
text_prompt = chatbot[-1][0]
genai.configure(api_key=google_key)
generation_config = genai.types.GenerationConfig(
temperature=temperature,
max_output_tokens=max_output_tokens,
stop_sequences=preprocess_stop_sequences(stop_sequences=stop_sequences),
top_k=top_k,
top_p=top_p)
if image_prompt is None:
model = genai.GenerativeModel('gemini-pro')
response = model.generate_content(
preprocess_chat_history(chatbot),
stream=True,
generation_config=generation_config)
response.resolve()
else:
image_prompt = preprocess_image(image_prompt)
model = genai.GenerativeModel('gemini-pro-vision')
response = model.generate_content(
contents=[text_prompt, image_prompt],
stream=True,
generation_config=generation_config)
response.resolve()
# streaming effect
chatbot[-1][1] = ""
for chunk in response:
for i in range(0, len(chunk.text), 10):
section = chunk.text[i:i + 10]
chatbot[-1][1] += section
time.sleep(0.01)
yield chatbot
image_prompt_component = gr.Image(type="pil", label="Image", scale=1, height=400)
chatbot_component = gr.Chatbot(
label='Gemini',
bubble_full_width=False,
avatar_images=AVATAR_IMAGES,
scale=2,
height=400
)
text_prompt_component = gr.Textbox(
placeholder="Hi there!",
label="Ask me anything and press Enter"
)
run_button_component = gr.Button()
temperature_component = gr.Slider(
minimum=0,
maximum=1.0,
value=0.4,
step=0.05,
label="Temperature",
info=(
"Temperature controls the degree of randomness in token selection. Lower "
"temperatures are good for prompts that expect a true or correct response, "
"while higher temperatures can lead to more diverse or unexpected results. "
))
max_output_tokens_component = gr.Slider(
minimum=1,
maximum=2048,
value=1024,
step=1,
label="Token limit",
info=(
"Token limit determines the maximum amount of text output from one prompt. A "
"token is approximately four characters. The default value is 2048."
))
stop_sequences_component = gr.Textbox(
label="Add stop sequence",
value="",
type="text",
placeholder="STOP, END",
info=(
"A stop sequence is a series of characters (including spaces) that stops "
"response generation if the model encounters it. The sequence is not included "
"as part of the response. You can add up to five stop sequences."
))
top_k_component = gr.Slider(
minimum=1,
maximum=40,
value=32,
step=1,
label="Top-K",
info=(
"Top-k changes how the model selects tokens for output. A top-k of 1 means the "
"selected token is the most probable among all tokens in the model’s "
"vocabulary (also called greedy decoding), while a top-k of 3 means that the "
"next token is selected from among the 3 most probable tokens (using "
"temperature)."
))
top_p_component = gr.Slider(
minimum=0,
maximum=1,
value=1,
step=0.01,
label="Top-P",
info=(
"Top-p changes how the model selects tokens for output. Tokens are selected "
"from most probable to least until the sum of their probabilities equals the "
"top-p value. For example, if tokens A, B, and C have a probability of .3, .2, "
"and .1 and the top-p value is .5, then the model will select either A or B as "
"the next token (using temperature). "
))
user_inputs = [
text_prompt_component,
chatbot_component
]
bot_inputs = [
image_prompt_component,
temperature_component,
max_output_tokens_component,
stop_sequences_component,
top_k_component,
top_p_component,
chatbot_component
]
with gr.Blocks() as demo:
gr.HTML(TITLE)
with gr.Column():
with gr.Row():
image_prompt_component.render()
chatbot_component.render()
text_prompt_component.render()
run_button_component.render()
with gr.Accordion("Parameters", open=False):
temperature_component.render()
max_output_tokens_component.render()
stop_sequences_component.render()
with gr.Accordion("Advanced", open=False):
top_k_component.render()
top_p_component.render()
run_button_component.click(
fn=user,
inputs=user_inputs,
outputs=[text_prompt_component, chatbot_component],
queue=False
).then(
fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
)
text_prompt_component.submit(
fn=user,
inputs=user_inputs,
outputs=[text_prompt_component, chatbot_component],
queue=False
).then(
fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
)
demo.queue(max_size=99).launch(auth=(oaiusr, oaipwd),show_api=False, debug=False, show_error=True)