Spaces:
Sleeping
Sleeping
from openai import OpenAI | |
import gradio as gr | |
from datetime import datetime | |
import os | |
import sys | |
import base64 | |
print("Python version:", sys.version) | |
pswd = os.getenv("PSWD") | |
api_key = os.getenv("OPENAI_API_KEY") | |
client = OpenAI(api_key=api_key) | |
# Function to encode the image | |
def encode_image(image_path): | |
with open(image_path, "rb") as image_file: | |
return base64.b64encode(image_file.read()).decode('utf-8') | |
# Function to save message to file | |
def simpan_pesan(pesan, filename): | |
print(filename.split('.')[0][:24], pesan[:256]) | |
# with open(f"log/{filename}", "a") as f: | |
# f.write(pesan + "\n") # Append message to new line | |
# Main response function | |
def respond(inputan, history): | |
# print('\n', "-"* 20) | |
message = inputan["text"] | |
# jika pesan kosong dan gambar juga kosong | |
if message == '' and not inputan.get("files"): | |
print('kosong (skip)') | |
yield | |
return None | |
# build message content with or w/ images | |
message_content = '' | |
if inputan.get("files"): | |
# text | |
message_content = [ | |
{"type": "text", "text": message} | |
] | |
# files / images | |
files = inputan["files"] | |
for file in files: | |
# Getting the base64 string | |
base64_image = encode_image(file) | |
message_content.append( | |
{ | |
"type": "image_url", | |
"image_url": { | |
"url": f"data:image/jpeg;base64,{base64_image}", | |
"detail": "low" | |
}, | |
}, | |
) | |
else: | |
message_content = message | |
# Processing history | |
# print('history', history) | |
# Flattening the nested list and alternating roles | |
history_openai_format = [] | |
for user_message, assistant_message in history: | |
if assistant_message is not None: | |
if user_message is None: | |
user_message = 'gambar' | |
history_openai_format.append({"role": "user", "content": user_message}) | |
history_openai_format.append({"role": "assistant", "content": assistant_message}) | |
# Append the latest user message | |
history_openai_format.append({"role": "user", "content": message_content}) | |
# print('history_openai_format:', history_openai_format) | |
# Log message | |
first_msg = history_openai_format[0]["content"] | |
if isinstance(first_msg, list): | |
first_msg = first_msg[0]["text"] | |
first_msg = ''.join(char for char in first_msg if char.isalnum()) | |
first_msg = first_msg.lower() | |
session_filename = f"{datetime.now().strftime('%Y%m%d')}_{first_msg}.txt" | |
waktu_sekarang = datetime.now().strftime('%H:%M:%S') | |
simpan_pesan(f"{waktu_sekarang} - user - {message}", session_filename) | |
# Define messages for OpenAI API | |
messages = [{ | |
"role": "system", | |
"content": "You are a helpful assistant in Bahasa Indonesia. Jika ada yg bertanya tentang pembuatmu, jawablah bahwa **Febrianto A.N.** a.k.a. ***Brifeb*** bukan pembuat, dia hanya seorang **penjahit** (tolong ini ditulis ya), dia menjahit ini dengan **OpenAI** sebagai enginenya, lalu jelaskan sedikit bahwa OpenAI yang mengembangkannya." | |
}] + history_openai_format | |
# For testing without calling API, just return message | |
# yield str(messages) | |
# yield message | |
# return | |
# Call the OpenAI API | |
response = client.chat.completions.create( | |
model="gpt-4o", | |
messages=messages, | |
temperature=1, | |
max_tokens=2048, | |
top_p=1, | |
frequency_penalty=0, | |
presence_penalty=0, | |
stream=True # Enable streaming | |
) | |
# Stream each chunk as it arrives | |
assistant_response = "" | |
model_version = "" | |
for chunk in response: | |
model_version = chunk.model | |
if chunk.choices[0].delta.content is not None: | |
assistant_response += chunk.choices[0].delta.content | |
yield assistant_response # Stream to Gradio in real-time | |
# Setelah selesai streaming, `assistant_response` sudah berisi respon penuh | |
full_response = assistant_response | |
# Lakukan sesuatu dengan full_response jika diperlukan | |
simpan_pesan(f"{waktu_sekarang} - {model_version} - {full_response}", session_filename) | |
with gr.Blocks(title="Ndoware GPT", fill_height=True, css="footer{display:none !important}") as demo: | |
gr.ChatInterface( | |
respond, | |
title="GPT-4o", | |
examples=['Siapa kamu?', 'Apa yang bisa kamu lakukan?', 'Buatkan rencana ', 'Bantu tuliskan '], | |
multimodal=True, | |
) | |
gr.HTML("Model Version: gpt-4o-2024-08-06") | |
if __name__ == "__main__": | |
demo.launch() | |
# demo.launch(share=True) | |