Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -29,9 +29,8 @@ openai_api_key = os.getenv('glhf')
|
|
29 |
ping_key = os.getenv('bolo')
|
30 |
api_url = os.getenv('yolo')
|
31 |
model = os.getenv('model')
|
32 |
-
|
33 |
mongoURI = os.getenv('MONGO_URI')
|
34 |
-
hf_token = os.getenv('hf_token')
|
35 |
|
36 |
# OpenAI and MongoDB clients
|
37 |
openai_client = OpenAI(api_key=openai_api_key, base_url=api_url)
|
@@ -96,8 +95,8 @@ def encode_local_image(image_file):
|
|
96 |
# Image description function, calling external inference model
|
97 |
def inference_calling_idefics(image_path, question=""):
|
98 |
system_prompt = os.getenv('USER_PROMPT')
|
99 |
-
model_id =
|
100 |
-
client = InferenceClient(
|
101 |
|
102 |
# Use the fixed `encode_local_image` to encode the image
|
103 |
image_base64 = encode_local_image(image_path)
|
@@ -106,12 +105,12 @@ def inference_calling_idefics(image_path, question=""):
|
|
106 |
return "Error: Invalid image or unable to encode image."
|
107 |
|
108 |
image_info = f"data:image/png;base64,{image_base64}"
|
109 |
-
prompt =
|
110 |
|
111 |
try:
|
112 |
response = ""
|
113 |
for message in client.chat_completion(
|
114 |
-
model=
|
115 |
messages=[
|
116 |
{
|
117 |
"role": "system",
|
@@ -153,35 +152,35 @@ async def get_bot_id():
|
|
153 |
|
154 |
# OpenAI completion handler
|
155 |
async def get_completion(event, user_id, prompt):
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
|
186 |
# Telegram bot commands
|
187 |
@client.on(events.NewMessage(pattern='/start'))
|
@@ -204,24 +203,24 @@ async def reset(event):
|
|
204 |
async def handle_message(event):
|
205 |
async with client.action(event.chat_id, 'typing'):
|
206 |
await asyncio.sleep(1)
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
|
226 |
# Gradio interface
|
227 |
def launch_gradio():
|
|
|
29 |
ping_key = os.getenv('bolo')
|
30 |
api_url = os.getenv('yolo')
|
31 |
model = os.getenv('model')
|
32 |
+
model2 = os.getenv('model2')
|
33 |
mongoURI = os.getenv('MONGO_URI')
|
|
|
34 |
|
35 |
# OpenAI and MongoDB clients
|
36 |
openai_client = OpenAI(api_key=openai_api_key, base_url=api_url)
|
|
|
95 |
# Image description function, calling external inference model
|
96 |
def inference_calling_idefics(image_path, question=""):
|
97 |
system_prompt = os.getenv('USER_PROMPT')
|
98 |
+
model_id = model2
|
99 |
+
client = InferenceClient(model=model_id)
|
100 |
|
101 |
# Use the fixed `encode_local_image` to encode the image
|
102 |
image_base64 = encode_local_image(image_path)
|
|
|
105 |
return "Error: Invalid image or unable to encode image."
|
106 |
|
107 |
image_info = f"data:image/png;base64,{image_base64}"
|
108 |
+
prompt = question if question != "" else 'Describe this image without question mark'
|
109 |
|
110 |
try:
|
111 |
response = ""
|
112 |
for message in client.chat_completion(
|
113 |
+
model=image_model,
|
114 |
messages=[
|
115 |
{
|
116 |
"role": "system",
|
|
|
152 |
|
153 |
# OpenAI completion handler
|
154 |
async def get_completion(event, user_id, prompt):
|
155 |
+
history = get_chat_history(user_id)
|
156 |
+
messages = [
|
157 |
+
{"role": "system", "content": system_prompt},
|
158 |
+
*history,
|
159 |
+
{"role": "user", "content": prompt},
|
160 |
+
]
|
161 |
+
try:
|
162 |
+
completion = openai_client.chat.completions.create(
|
163 |
+
model=model,
|
164 |
+
messages=messages,
|
165 |
+
max_tokens=512,
|
166 |
+
temperature=1.04,
|
167 |
+
top_p=0.9,
|
168 |
+
frequency_penalty=0.9,
|
169 |
+
presence_penalty=0.9,
|
170 |
+
stream=True
|
171 |
+
)
|
172 |
+
message = ""
|
173 |
+
bot_message = event.respond('♥')
|
174 |
+
async for chunk in completion:
|
175 |
+
if chunk.choices[0].delta.content is not None:
|
176 |
+
message += chunk.choices[0].delta.content
|
177 |
+
bot_message.edit(message)
|
178 |
+
except Exception as e:
|
179 |
+
message = "Whoops!"
|
180 |
+
print(e)
|
181 |
+
update_chat_history(user_id, "user", prompt)
|
182 |
+
update_chat_history(user_id, "assistant", message)
|
183 |
+
return message
|
184 |
|
185 |
# Telegram bot commands
|
186 |
@client.on(events.NewMessage(pattern='/start'))
|
|
|
203 |
async def handle_message(event):
|
204 |
async with client.action(event.chat_id, 'typing'):
|
205 |
await asyncio.sleep(1)
|
206 |
+
bot_id = await get_bot_id()
|
207 |
+
try:
|
208 |
+
user_id = event.chat_id
|
209 |
+
if event.sender_id == bot_id:
|
210 |
+
return
|
211 |
+
user_message = event.raw_text
|
212 |
+
if event.photo:
|
213 |
+
photo = await event.download_media()
|
214 |
+
if photo:
|
215 |
+
image_description = describe_image(photo, user_message)
|
216 |
+
user_message += f"\n\nI sent you an image. Content of the image: {image_description}"
|
217 |
+
if user_message.startswith('/start') or user_message.startswith('/help') or user_message.startswith('/reset'):
|
218 |
+
return
|
219 |
+
response = await get_completion(event, user_id, user_message)
|
220 |
+
await event.respond(response)
|
221 |
+
except Exception as e:
|
222 |
+
print(f"An error occurred: {e}")
|
223 |
+
await event.respond("Whoopsie!")
|
224 |
|
225 |
# Gradio interface
|
226 |
def launch_gradio():
|