ChatGPT-4o / app.py
liyaoshi's picture
Update app.py
690229e verified
raw
history blame
No virus
5.48 kB
import gradio as gr
from huggingface_hub import InferenceClient
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
from google.cloud import storage
from google.oauth2 import service_account
import json
import os
import requests
# upload image to google cloud storage
def upload_file_to_gcs_blob(file):
google_creds = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS_JSON")
creds_json = json.loads(google_creds)
credentials = service_account.Credentials.from_service_account_info(creds_json)
# Google Cloud credentials
storage_client = storage.Client(credentials=credentials, project=creds_json['project_id'])
bucket_name=os.environ.get('bucket_name')
bucket = storage_client.bucket(bucket_name)
destination_blob_name = os.path.basename(file)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(file)
public_url = blob.public_url
return public_url
from PIL import Image
def is_image(file_path):
try:
Image.open(file_path)
return True
except IOError:
return False
# def respond(
# message,
# history: list[tuple[str, str]],
# system_message,
# max_tokens,
# temperature,
# top_p,
# ):
# messages = [{"role": "system", "content": system_message}]
# for val in history:
# if val[0]:
# messages.append({"role": "user", "content": val[0]})
# if val[1]:
# messages.append({"role": "assistant", "content": val[1]})
# messages.append({"role": "user", "content": message})
# response = ""
# for message in client.chat_completion(
# messages,
# max_tokens=max_tokens,
# stream=True,
# temperature=temperature,
# top_p=top_p,
# ):
# token = message.choices[0].delta.content
# response += token
# yield response
def get_completion(message,history):
if message["text"].strip() == "" and not message["files"]:
raise gr.Error("Please input a query and optionally image(s).")
if message["text"].strip() == "" and message["files"]:
raise gr.Error("Please input a text query along the image(s).")
text = message['text']
content = [
{"type": "text", "text": text},
]
if message['files']:
file = message['files'][0]
public_url = upload_file_to_gcs_blob(file)
if is_image(file): # only support image file now
content_image = {
"type": "image_url",
"image_url": {
"url": public_url,
},}
content.append(content_image)
else:
raise gr.Error("Only support image files now.")
history_openai_format = []
for human, assistant in history:
# check if there is image info in the history message
if isinstance(human, tuple):
continue
history_openai_format.append({"role": "user", "content": human })
history_openai_format.append({"role": "assistant", "content":assistant})
history_openai_format.append({"role": "user", "content": content})
print(history_openai_format)
system_message = '''You are GPT-4o("o" for omni), OpenAI's new flagship model that can reason across audio, vision, and text in real time.
GPT-4o matches GPT-4 Turbo performance on text in English and code, with significant improvement on text in non-English languages, while also being much faster.
GPT-4o is especially better at vision and audio understanding compared to existing models.
GPT-4o's text and image capabilities are avaliable for users now. More capabilities like audio, and video will be rolled out iteratively in the future.
'''
# headers
openai_api_key = os.environ.get('openai_api_key')
headers = {
'Authorization': f'Bearer {openai_api_key}'
}
temperature = 0.7
max_tokens = 2048
init_message = [{"role": "system", "content": system_message}]
messages = init_message + history_openai_format[-5:], #system message + latest 2 round dialogues + user input
# request body
data = {
'model': 'gpt-4o', # we use gpt-4o here
'messages': messages,
'temperature':temperature,
'max_tokens':max_tokens,
# 'stream':True,
}
# get response
response = requests.post('https://burn.hair/v1/chat/completions', headers=headers, json=data)
response_data = response.json()
if 'error' in response_data:
response_content = response_data['error']['message']
else:
response_content = response_data['choices'][0]['message']['content']
usage = response_data['usage']
return response_content
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
get_completion,
multimodal=True,
# additional_inputs=[
# gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
# gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
# ],
)
demo.queue(max_size = 10)
if __name__ == "__main__":
demo.launch()