File size: 10,686 Bytes
32cf150
 
 
 
 
 
9cdf772
32cf150
 
9cdf772
 
 
ca283bb
 
32cf150
9cdf772
0dd906f
32cf150
9cdf772
 
 
 
 
0dd906f
9cdf772
 
 
 
 
0dd906f
9cdf772
 
0dd906f
9cdf772
 
 
 
 
 
0dd906f
 
 
 
 
 
 
 
9cdf772
f2aa2ee
 
 
9bfbbf7
3a00add
f2aa2ee
 
f072981
3c749a0
f072981
3c749a0
9bfbbf7
f072981
 
77c5b68
f072981
 
 
 
 
 
 
 
9bfbbf7
9cdf772
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0105e62
 
 
 
 
 
 
 
8a55776
0105e62
 
8a55776
0105e62
 
 
 
 
 
 
8a55776
7ac7217
0105e62
 
 
 
 
 
 
 
 
 
 
 
 
 
aa02a3e
0105e62
 
 
 
f072981
aa02a3e
0105e62
 
 
9cdf772
0105e62
 
9cdf772
0105e62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0d622e1
0105e62
 
0d622e1
0105e62
 
 
 
9cdf772
0105e62
 
 
 
 
0dd906f
9cdf772
0105e62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4216e4b
0105e62
9cdf772
32cf150
0105e62
3fe70dd
6d95457
39c5610
32cf150
 
 
 
 
0ccade8
 
 
 
aa02a3e
 
 
 
 
 
 
b7dd70d
3fe70dd
 
bd842b8
3fe70dd
 
 
aa02a3e
 
 
 
 
 
 
 
 
 
 
32cf150
b7dd70d
 
0ccade8
0dd906f
32cf150
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
import gradio as gr
from huggingface_hub import InferenceClient

"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")


from google.cloud import storage
from google.oauth2 import service_account
import json
import os
import requests

# upload image to google cloud storage
def upload_file_to_gcs_blob(file):

    google_creds = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS_JSON")

    creds_json = json.loads(google_creds)
    credentials = service_account.Credentials.from_service_account_info(creds_json)

    # Google Cloud credentials
    storage_client = storage.Client(credentials=credentials, project=creds_json['project_id'])

    bucket_name=os.environ.get('bucket_name')
    bucket = storage_client.bucket(bucket_name)
    
    destination_blob_name = os.path.basename(file)
    blob = bucket.blob(destination_blob_name)

    blob.upload_from_filename(file)

    public_url = blob.public_url
    
    return public_url


from PIL import Image

def is_image(file_path):
    try:
        Image.open(file_path)
        return True
    except IOError:
        return False

from supabase import create_client, Client

def get_supabase_client():
    url = os.environ.get('supabase_url')
    key = os.environ.get('supbase_key')
    supabase = create_client(url, key)
    return supabase

def supabase_insert_message(user_message,response_content,messages,response_data,user_name,user_oauth_token,ip,sign,cookie_value,content_type):
    supabase = get_supabase_client()
    data, count = supabase.table('messages').insert({"user_message": user_message, "response_content": response_content,"messages":messages,"response":response_data,"user_name":user_name,"user_oauth_token":user_oauth_token,"ip":ip,"sign":sign,"cookie":cookie_value,"content_type":content_type}).execute()

def supabase_insert_user(name,user_name,profile,picture,oauth_token):
    supabase = get_supabase_client()
    data, count = supabase.table('users').insert({"name":name,"user_name":user_name,"profile":profile,"picture":picture,"oauth_token":oauth_token}).execute()


def supabase_fetch_user(user_name):
    supabase = get_supabase_client()
    data,count = supabase.table('users').select("*").eq('user_name',user_name).execute()
    return data
        


# def respond(
#     message,
#     history: list[tuple[str, str]],
#     system_message,
#     max_tokens,
#     temperature,
#     top_p,
# ):
#     messages = [{"role": "system", "content": system_message}]

#     for val in history:
#         if val[0]:
#             messages.append({"role": "user", "content": val[0]})
#         if val[1]:
#             messages.append({"role": "assistant", "content": val[1]})

#     messages.append({"role": "user", "content": message})

#     response = ""

#     for message in client.chat_completion(
#         messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         token = message.choices[0].delta.content

#         response += token
#         yield response

# def get_completion(message,history,profile: gr.OAuthProfile | None,oauth_token: gr.OAuthToken | None,request: gr.Request):
#     if request:
#         ip = request.client.host
#         print("Query parameters:", dict(request.query_params))
#         sign = dict(request.query_params).get('__sign')

#         # get cookie
#         headers = request.headers.raw 
        
#         # find 'cookie'
#         cookie_header = next((header for header in headers if header[0] == b'cookie'), None)
        
#         if cookie_header:
#             # extract cookie
#             cookie_value = cookie_header[1].decode()
#             print(f"Cookie: {cookie_value}")
#         else:
#             cookie_value = ''
#             print("No cookie found in request headers")
            
    
#     # check login
#     if profile is None:
#         # raise gr.Error('Click "Sign in with Hugging Face" to continue')
#         user_name = 'unknown'
#         user_oauth_token = ''
#         name = 'unknown'
#         pf = ''
#         pic = ''
#     else:
#         user_name = profile.username
#         user_oauth_token = oauth_token.token
#         name = profile.name
#         pf = profile.profile
#         pic = profile.picture
        
#     # check if user exists
#     user_data = supabase_fetch_user(user_name)
#     if not user_data[1]:
#         supabase_insert_user(name,user_name,pf,pic,user_oauth_token)

    
#     # check if messages are empty
#     if message["text"].strip() == "" and not message["files"]:
#         raise gr.Error("Please input a query and optionally image(s).")
    
#     if message["text"].strip() == "" and message["files"]:
#         raise gr.Error("Please input a text query along the image(s).")
    
#     text = message['text']
#     user_message = [
#         {"type": "text", "text": text},
#     ]
#     content_type = 'text'
#     if message['files']:
#         file = message['files'][0]
#         public_url = upload_file_to_gcs_blob(file)
#         if is_image(file): # only support image file now
#             content_image = {
#                 "type": "image_url",
#                 "image_url": {
#                     "url": public_url,
#                 },}
#             user_message.append(content_image)
#             content_type = 'image'
#         else:
#             raise gr.Error("Only support image files now.")

#     history_openai_format = []
#     for human, assistant in history:
#         # check if there is image info in the history message or empty history messages
        
#         if isinstance(human, tuple) or human == "" or assistant is None:
#             continue
            
#         history_openai_format.append({"role": "user", "content": human })
#         history_openai_format.append({"role": "assistant", "content":assistant})
#     history_openai_format.append({"role": "user", "content": user_message})
#     # print(history_openai_format)
    
#     system_message = '''You are GPT-4o("o" for omni), OpenAI's new flagship model that can reason across audio, vision, and text in real time. 
#     GPT-4o matches GPT-4 Turbo performance on text in English and code, with significant improvement on text in non-English languages, while also being much faster. 
#     GPT-4o is especially better at vision and audio understanding compared to existing models.
#     GPT-4o's text and image capabilities are avaliable for users now. More capabilities like audio and video will be rolled out iteratively in the future.
#     '''

    
#     # headers
#     openai_api_key = os.environ.get('openai_api_key')
#     base_url = os.environ.get('base_url')
#     headers = {
#       'Authorization': f'Bearer {openai_api_key}'
#     }

#     temperature = 0.7
#     max_tokens = 2048

#     init_message = [{"role": "system", "content": system_message}]
#     messages = init_message + history_openai_format[-5:] #system message + latest 2 round dialogues + user input
#     print(messages)
#     # request body
#     data = {
#         'model': 'gpt-4o',  # we use gpt-4o here
#         'messages': messages,
#         'temperature':temperature, 
#         'max_tokens':max_tokens,
#         'stream':True,
#         # 'stream_options':{"include_usage": True}, # retrieving token usage for stream response
#     }

#     # get response
#     # response = requests.post(base_url, headers=headers, json=data)
#     # response_data = response.json()
#     # print(response_data)
#     # print('-----------------------------------\n')
#     # if 'error' in response_data:
#     #     response_content = response_data['error']['message']
#     # else:
#     #     response_content = response_data['choices'][0]['message']['content']
#     #     usage = response_data['usage']
#     # return response_content

#     # get response with stream
#     response = requests.post(base_url, headers=headers, json=data,stream=True)
#     response_content = ""
#     for line in response.iter_lines():
#         line = line.decode().strip()
#         if line == "data: [DONE]":
#             continue
#         elif line.startswith("data: "):
#             line = line[6:] # remove prefix "data: "
#             try:
#                 data = json.loads(line)
#                 if "delta" in data["choices"][0]:
#                     content = data["choices"][0]["delta"].get("content", "")
#                     response_content += content
#                     yield response_content
#             except json.JSONDecodeError:
#                 print(f"Error decoding line: {line}")

#     print(response_content)
#     print('-----------------------------------\n')
#     response_data = {}
    
#     supabase_insert_message(user_message,response_content,messages,response_data,user_name,user_oauth_token,ip,sign,cookie_value,content_type)
    

def get_completion(message,history):
    print(message)
    res =  "**Important Announcement:**  \n\nThis space is shutting down now. \n\nVisit [chatgpt-4o](https://chatgpt-4o.streamlit.app/) for an improved UI experience and future enhancements."
    return res


"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""

title = "ChatGPT-4o"
description = "This is GPT-4o, you can use the text and image capabilities now. More capabilities like audio and video will be rolled out iteratively in the future. Stay tuned."


with gr.Blocks(fill_height=True) as demo:
    gr.Markdown(
        "# ChatGPT-4o"
        "\n\nThis is GPT-4o, you can use the text and image capabilities now. More capabilities like audio and video will be rolled out iteratively in the future. Stay tuned."
    )
    gr.LoginButton()
    
    # gr.Markdown("""
    #     ## This space will be shutting down soon. \n\n
        
    #     ## Visit [chatgpt-4o](https://chatgpt-4o.streamlit.app/) for an improved UI experience and future enhancements.
    #     """
    # )
    gr.ChatInterface(
        get_completion,
        multimodal=True,
        # title = title,
        # description = description
        # additional_inputs=[
        #     gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
        #     gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        #     gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        # ],
    )



demo.queue(default_concurrency_limit=5)


if __name__ == "__main__":
    demo.launch()