import gradio as gr import os import time import requests import base64 import pymongo import certifi token = '5UAYO8UWHNQKT3UUS9H8V360L76MD72DRIUY9QC2' # Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text. uri = "mongodb+srv://clementrof:t5fXqwpDQYFpvuCk@cluster0.rl5qhcj.mongodb.net/?retryWrites=true&w=majority" # Create a new client and connect to the server client = pymongo.MongoClient(uri, tlsCAFile=certifi.where()) # Send a ping to confirm a successful connection try: client.admin.command('ping') print("Pinged your deployment. You successfully connected to MongoDB!") except Exception as e: print(e) # Access your database db = client.get_database('camila') records = db.info ######################################### ######################################### def LLM_call(message_log): serverless_api_id = '4whzcbwuriohqh' # Define the URL you want to send the request to url = f"https://api.runpod.ai/v2/{serverless_api_id}/run" # Define your custom headers headers = { "Authorization": f"Bearer {token}", "Accept": "application/json", "Content-Type": "application/json" } # Define your data (this could also be a JSON payload) data = { "input": { "prompt": message_log, "max_new_tokens": 4500, "temperature": 0.7, "top_k": 50, "top_p": 0.9, "repetition_penalty": 1.2, "batch_size": 8, "stop": [""] } } # Send the POST request with headers and data call = requests.post(url, headers=headers, json=data) response_data = call.json() msg_id = response_data['id'] print("Message ID:", msg_id) output = "Output not available" # Poll the API until the response is ready while True: # Get the status using the message ID response = requests.get(f"https://api.runpod.ai/v2/{serverless_api_id}/status/{msg_id}", headers=headers) if response.status_code == 200: response_data = response.json() status = response_data.get('status') if status == 'COMPLETED': # Access the 'output' directly from the response output = response_data.get('output', 'Output not available') print("Response content:", output) break # Exit the loop once the response is ready elif status == 'FAILED': error_message = response_data.get('error', 'Unknown error') print("Request failed. Reason:", error_message) break # Exit the loop if the request failed else: print("Failed to get status. HTTP status code:", response.status_code) # Wait for a short time before polling again (e.g., 2 seconds) time.sleep(2) return output ######################################### ######################################### def print_like_dislike(x: gr.LikeData): print(x.index, x.value, x.liked) # def add_text(history, text): # history = history + [(text, None)] # return history, gr.Textbox(value="", interactive=False) def add_text(history, text): history = history + [((text[0], None),)] # Make sure to put the tuple inside a list return history, gr.Textbox(value="", interactive=False) def add_file(history, file): history = history + [((file.name,), None)] return history ######################################### ######################################### def bot(history): ID=2 existing_user_doc = records.find_one({'ID': ID}) if existing_user_doc: message_log = [{"role": "system", "content": f"I want you to act as a woman named Julie. Use colloquial language. Julie is 25 years old, she is very hot physically, she loves watching football, she loves to talk about sex. Julie loves sex and being fucked. She never refuses anything. You'll answer to a man, call him baby. Write very short answers, 15 words."}, ] messages = existing_user_doc['message'] if len(messages)>5: messages = messages[-5:] message_log.extend(messages) print (history) new_message = {"role": "user", "content": history} message_log.append(new_message) response = LLM_call(message_log) else: new_user_doc = { 'ID': ID, 'message': [] } records.insert_one(new_user_doc) response = "Hello" history[-1][1] = "" for character in response: history[-1][1] += character time.sleep(0.05) yield history ######################################### ######################################### with gr.Blocks() as demo: chatbot = gr.Chatbot( [], elem_id="chatbot", bubble_full_width=False, avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.jpg"))), ) with gr.Row(): txt = gr.Textbox( scale=4, show_label=False, placeholder="Enter text and press enter, or upload an image", container=False, ) btn = gr.UploadButton("📁", file_types=["image", "video", "audio"]) txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then( bot, chatbot, chatbot, api_name="bot_response" ) txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False) file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then( bot, chatbot, chatbot ) chatbot.like(print_like_dislike, None, None) demo.queue() if __name__ == "__main__": demo.launch()