Spaces:
Paused
Paused
File size: 8,705 Bytes
5dddbd5 ca7635c 8b4a19c ca7635c 5dddbd5 ca7635c 5dddbd5 ca7635c f5aea65 c1cf8fd 1fd740a c1cf8fd cded38f 5dddbd5 ca7635c 98ba2b1 ca7635c cded38f ca7635c cded38f ca7635c 9995b09 ca7635c 4cf79aa ca7635c 4cf79aa ca7635c 5dddbd5 ca7635c 9995b09 ca7635c cded38f ca7635c cded38f eb1e0c3 98ba2b1 cded38f eb1e0c3 98ba2b1 c989b7d cded38f c989b7d 1ea6945 c989b7d cded38f 01961ed 21221a0 304b01e eb1e0c3 304b01e eb1e0c3 4cf79aa ca7635c cded38f ca7635c 304b01e cded38f 21221a0 9995b09 1ea6945 9995b09 6bfd132 9995b09 6bfd132 9995b09 ca7635c cded38f 5dddbd5 304b01e 5dddbd5 304b01e 5dddbd5 ca7635c 4cf79aa ca7635c 4cf79aa 1fd740a 5dddbd5 98ba2b1 cecc149 21221a0 cecc149 98ba2b1 9995b09 98ba2b1 5dddbd5 cded38f 8b4a19c 5a1de4d cecc149 8b4a19c cded38f 5dddbd5 ca7635c 1fd740a ca7635c cded38f 4cf79aa ca7635c 304b01e ca7635c b789864 1fd740a 4cf79aa 5dddbd5 cded38f 5dddbd5 1fd740a 8b4a19c 1fd740a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 |
import os
import time
import threading
import base64
from io import BytesIO
import gradio as gr
import asyncio
from collections import OrderedDict
from datetime import datetime
import requests
from openai import OpenAI
from telethon import TelegramClient, events
from PIL import Image
from huggingface_hub import InferenceClient
import pymongo
from pymongo import MongoClient
def load_system_prompt():
with open('prompt.txt', 'r') as file:
return file.read()
system_prompt = load_system_prompt()
# Environment variables
api_id = os.getenv('api_id')
api_hash = os.getenv('api_hash')
bot_token = os.getenv('bot_token')
openai_api_key = os.getenv('glhf')
ping_key = os.getenv('bolo')
api_url = os.getenv('yolo')
model = os.getenv('model')
model2 = os.getenv('model2')
mongoURI = os.getenv('MONGO_URI')
# OpenAI and MongoDB clients
openai_client = OpenAI(api_key=openai_api_key, base_url=api_url)
mongo_client = MongoClient(mongoURI)
db = mongo_client['Scarlett']
chat_collection = db['chats']
local_chat_history = OrderedDict()
MAX_LOCAL_USERS = 5
# Functions for MongoDB-based chat history storage and retrieval
def get_history_from_mongo(user_id):
result = chat_collection.find_one({"user_id": user_id})
return result.get("messages", []) if result else []
def store_message_in_mongo(user_id, role, content):
chat_collection.update_one(
{"user_id": user_id},
{
"$push": {
"messages": {
"$each": [{"role": role, "content": content}],
"$slice": -20
}
}
},
upsert=True
)
def get_chat_history(user_id):
if user_id in local_chat_history:
local_chat_history.move_to_end(user_id)
return local_chat_history[user_id]
history = get_history_from_mongo(user_id)
local_chat_history[user_id] = history
if len(local_chat_history) > MAX_LOCAL_USERS:
local_chat_history.popitem(last=False)
return history
def update_chat_history(user_id, role, content):
if user_id not in local_chat_history:
local_chat_history[user_id] = get_history_from_mongo(user_id)
local_chat_history[user_id].append({"role": role, "content": content})
local_chat_history[user_id] = local_chat_history[user_id][-20:]
local_chat_history.move_to_end(user_id)
if len(local_chat_history) > MAX_LOCAL_USERS:
local_chat_history.popitem(last=False)
store_message_in_mongo(user_id, role, content)
# Fixing image encoding
def encode_local_image(image_file):
try:
im = Image.open(image_file)
buffered = BytesIO()
im.save(buffered, format="PNG")
image_bytes = buffered.getvalue()
image_base64 = base64.b64encode(image_bytes).decode('ascii')
return image_base64
except Exception as e:
print(f"Error encoding image: {e}")
return None
# Image description function, calling external inference model
def inference_calling_idefics(image_path, question=""):
system_prompt = os.getenv('USER_PROMPT')
model_id = model2
client = InferenceClient(model=model_id)
# Use the fixed `encode_local_image` to encode the image
image_base64 = encode_local_image(image_path)
if not image_base64:
return "Error: Invalid image or unable to encode image."
image_info = f"data:image/png;base64,{image_base64}"
prompt = question if question != "" else 'Describe this image without question mark'
try:
response = ""
for message in client.chat_completion(
model=image_model,
messages=[
{
"role": "system",
"content": [
{"type": "text", "text": system_prompt},
],
},
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": image_info}},
{"type": "text", "text": prompt},
],
}
],
max_tokens=2048,
stream=True,
):
response += message.choices[0].delta.content
return response
except Exception as e:
print(f"Error in inference call: {e}")
return "Error while processing the image."
def describe_image(client, image_path, question=""):
try:
answer = inference_calling_idefics(image_path, question)
return answer
except Exception as e:
print(e)
return "Error while seeing the image."
# Telegram bot setup
client = TelegramClient('bot', api_id, api_hash).start(bot_token=bot_token)
async def get_bot_id():
me = await client.get_me()
return me.id
# OpenAI completion handler
async def get_completion(client, event, user_id, prompt):
history = get_chat_history(user_id)
messages = [
{"role": "system", "content": system_prompt},
*history,
{"role": "user", "content": prompt},
]
try:
completion = openai_client.chat.completions.create(
model=model,
messages=messages,
max_tokens=512,
temperature=0.7,
top_p=1.0,
frequency_penalty=1.0,
presence_penalty=1.0,
stream=True
)
message = ""
for chunk in completion:
if chunk.choices[0].delta.content is not None:
message += chunk.choices[0].delta.content
await event.respond(message)
except Exception as e:
message = "..........."
await event.respond(message)
print(e)
update_chat_history(user_id, "user", prompt)
update_chat_history(user_id, "assistant", message)
return message
# Telegram bot commands
@client.on(events.NewMessage(pattern='/start'))
async def start(event):
await event.respond("Hello!")
@client.on(events.NewMessage(pattern='/help'))
async def help(event):
await event.respond("Here is how I can help you:\n/start - To check if I am alive\n/help - Show this message\n/reset - Reset chat history")
@client.on(events.NewMessage(pattern='/reset'))
async def reset(event):
user_id = event.chat_id
chat_collection.delete_one({"user_id": user_id})
if user_id in local_chat_history:
del local_chat_history[user_id]
await event.respond("History has been reset.")
@client.on(events.NewMessage)
async def handle_message(event):
bot_id = await get_bot_id()
if event.sender_id == bot_id:
return
user_id = event.chat_id
user_message = event.raw_text
if event.photo:
photo = await event.download_media()
if photo:
image_description = describe_image(client, photo, user_message)
user_message += f"\n\nI sent you an image. Content of the image: {image_description}"
if user_message.startswith(('/start', '/help', '/reset')):
return
try:
async with client.action(event.chat_id, 'typing'):
await asyncio.sleep(2)
await get_completion(client, event, user_id, user_message)
except Exception as e:
print(f"An error occurred: {e}")
await event.respond("Whoopsie!")
# Gradio interface
def launch_gradio():
welcome_message = """
# Meet Scarlett!
- Scarlett is a girl residing in Telegram
- She can chat with you without any censorship.
- Head over to https://t.me/Scarlett_Rogue_isNotBot
- Thanks to https://glhf.chat for powering this!
"""
with gr.Blocks() as demo:
gr.Markdown(f"""
# Follow me: https://example.com
{welcome_message}
""")
demo.launch(show_api=False)
# Keep-alive functionality for the bot
def keep_alive():
ping_client = OpenAI(api_key=ping_key, base_url=api_url)
while True:
try:
messages = [
{"role": "system", "content": "Repeat what I say."},
{"role": "user", "content": "Repeat: 'Ping success'"}
]
request = ping_client.chat.completions.create(
model=model,
messages=messages,
max_tokens=10,
temperature=0.1,
top_p=0.1,
)
print(request.choices[0].message.content)
except Exception as e:
print(f"Keep-alive request failed: {e}")
time.sleep(1800)
# Main execution
if __name__ == "__main__":
threading.Thread(target=keep_alive).start()
threading.Thread(target=launch_gradio).start()
client.run_until_disconnected() |