import gradio as gr import requests import os import json import random from elo import update_elo_ratings # Custom function for ELO ratings # Load the chatbot URLs and their respective model names from a JSON file with open('chatbot_urls.json', 'r') as file: chatbots = json.load(file) # Thread-local storage for user-specific data # Initialize or get user-specific ELO ratings def get_user_elo_ratings(state): if 'elo_ratings' not in state: state['elo_ratings'] = read_elo_ratings() return state['elo_ratings'] # Read ELO ratings from file (thread-safe) def read_elo_ratings(): try: with open('elo_ratings.json', 'r') as file: elo_ratings = json.load(file) except FileNotFoundError: elo_ratings = {model: 1200 for model in chatbots.keys()} return elo_ratings # Write ELO ratings to file (thread-safe) def write_elo_ratings(elo_ratings): with open('elo_ratings.json', 'w') as file: json.dump(elo_ratings, file, indent=4) def get_bot_response(url, prompt): payload = { "input": { "prompt": prompt, "sampling_params": { "max_new_tokens": 16, "temperature": 0.7, } } } headers = { "accept": "application/json", "content-type": "application/json", "authorization": os.environ.get("RUNPOD_TOKEN") } response = requests.post(url, json=payload, headers=headers) return response.json()['output'][0]['generated_text'].replace(prompt,"") def chat_with_bots(user_input): bot_names = list(chatbots.keys()) random.shuffle(bot_names) bot1_url, bot2_url = chatbots[bot_names[0]], chatbots[bot_names[1]] state['last_bots'] = [bot_names[0], bot_names[1]] bot1_response = get_bot_response(bot1_url, user_input) bot2_response = get_bot_response(bot2_url, user_input) return bot1_response, bot2_response def update_ratings(state, winner_index): elo_ratings = get_user_elo_ratings() bot_names = list(chatbots.keys()) winner = state['last_bots'][winner_index] loser = state['last_bots'][1 - winner_index] elo_ratings = update_elo_ratings(elo_ratings, winner, loser) write_elo_ratings(elo_ratings) return f"Updated ELO ratings:\n{winner}: {elo_ratings[winner]}\n{loser}: {elo_ratings[loser]}" def vote_up_model(state, chatbot): update_message = update_ratings(state, 0) chatbot.append(update_message) return chatbot def vote_down_model(state, chatbot): update_message = update_ratings(state, 1) chatbot.append(update_message) return chatbot def user_ask(state, chatbot1, chatbot2, textbox, upvote_btn_a, upvote_btn_b): user_input = textbox bot1_response, bot2_response = chat_with_bots(user_input) chatbot1.append("User: " + user_input) chatbot1.append("Bot 1: " + bot1_response) chatbot2.append("User: " + user_input) chatbot2.append("Bot 2: " + bot2_response) # Enable voting buttons upvote_btn_a.interactive = True upvote_btn_b.interactive = True print(state, chatbot1, chatbot2, "", upvote_btn_a, upvote_btn_b) state['elo_ratings'] = get_user_elo_ratings(state) return state, chatbot1, chatbot2, "", upvote_btn_a, upvote_btn_b # ... [Rest of your existing functions] ... with gr.Blocks() as demo: state = gr.State({}) with gr.Row(): # First column for Model A with gr.Column(): chatbot1 = gr.Chatbot(label='Model A') with gr.Row(): upvote_btn_a = gr.Button(value="👍 Upvote A", interactive=False) # Second column for Model B with gr.Column(): chatbot2 = gr.Chatbot(label='Model B') with gr.Row(): upvote_btn_b = gr.Button(value="👍 Upvote B", interactive=False) # Textbox and submit button at the bottom with gr.Row(): textbox = gr.Textbox(placeholder="Enter your prompt and press ENTER") submit_btn = gr.Button(value="Send") # Interaction logic textbox.submit(user_ask, [state, chatbot1, chatbot2, textbox,upvote_btn_a, upvote_btn_b], [state, chatbot1, chatbot2, textbox,upvote_btn_a, upvote_btn_b]) submit_btn.click(user_ask, [state, chatbot1, chatbot2, textbox,upvote_btn_a, upvote_btn_b], [state, chatbot1, chatbot2, textbox,upvote_btn_a, upvote_btn_b]) upvote_btn_a.click(vote_up_model, [state, chatbot1], [chatbot1]) upvote_btn_b.click(vote_up_model, [state, chatbot2], [chatbot2]) # Start the interface demo.queue() demo.launch(server_name='0.0.0.0', server_port=7860)