Spaces:
Runtime error
Runtime error
jordonpeter01
commited on
Commit
•
d9076ee
0
Parent(s):
Duplicate from jordonpeter01/rlhf-arena-aws
Browse files- .gitattributes +34 -0
- README.md +20 -0
- app.py +604 -0
- calculate_elo.py +309 -0
- requirements.txt +3 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Community ChatBot Arena
|
3 |
+
emoji: 🤖⚔️🤖
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.33.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: true
|
10 |
+
license: apache-2.0
|
11 |
+
duplicated_from: jordonpeter01/rlhf-arena-aws
|
12 |
+
---
|
13 |
+
|
14 |
+
# OpenAccess AI Collective Community ChatBot Arena
|
15 |
+
|
16 |
+
- Arena: https://huggingface.co/spaces/openaccess-ai-collective/rlhf-arena
|
17 |
+
- GitHub: https://github.com/OpenAccess-AI-Collective/rlhf-arena
|
18 |
+
- Built using Runpod Serverless. See our writeup here: https://medium.com/@winglian/inference-any-llm-with-serverless-in-15-minutes-69eeb548a41d
|
19 |
+
- Want to have your language model added to the Arena? [Create an Issue](https://github.com/OpenAccess-AI-Collective/rlhf-arena/issues) or reach out on [Discord](https://discord.gg/PugNNHAF5r)
|
20 |
+
- [💵 Consider Donating on our Patreon](http://patreon.com/OpenAccessAICollective)
|
app.py
ADDED
@@ -0,0 +1,604 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import concurrent
|
2 |
+
import functools
|
3 |
+
import logging
|
4 |
+
import os
|
5 |
+
import random
|
6 |
+
import re
|
7 |
+
import traceback
|
8 |
+
import uuid
|
9 |
+
import datetime
|
10 |
+
from collections import deque
|
11 |
+
import itertools
|
12 |
+
|
13 |
+
from collections import defaultdict
|
14 |
+
from time import sleep
|
15 |
+
from typing import Generator, Tuple, List, Dict
|
16 |
+
|
17 |
+
import boto3
|
18 |
+
import gradio as gr
|
19 |
+
import requests
|
20 |
+
from datasets import load_dataset
|
21 |
+
|
22 |
+
logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
|
23 |
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
24 |
+
|
25 |
+
# Create a DynamoDB client
|
26 |
+
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
|
27 |
+
# Get a reference to the table
|
28 |
+
table = dynamodb.Table('oaaic_chatbot_arena')
|
29 |
+
|
30 |
+
|
31 |
+
def prompt_human_instruct(system_msg, history):
|
32 |
+
return system_msg.strip() + "\n" + \
|
33 |
+
"\n".join(["\n".join(["###Human: "+item[0], "###Assistant: "+item[1]])
|
34 |
+
for item in history])
|
35 |
+
|
36 |
+
|
37 |
+
def prompt_instruct(system_msg, history):
|
38 |
+
return system_msg.strip() + "\n" + \
|
39 |
+
"\n".join(["\n".join(["### Instruction: "+item[0], "### Response: "+item[1]])
|
40 |
+
for item in history])
|
41 |
+
|
42 |
+
|
43 |
+
def prompt_chat(system_msg, history):
|
44 |
+
return system_msg.strip() + "\n" + \
|
45 |
+
"\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]])
|
46 |
+
for item in history])
|
47 |
+
|
48 |
+
|
49 |
+
def prompt_roleplay(system_msg, history):
|
50 |
+
return "<|system|>" + system_msg.strip() + "\n" + \
|
51 |
+
"\n".join(["\n".join(["<|user|>"+item[0], "<|model|>"+item[1]])
|
52 |
+
for item in history])
|
53 |
+
|
54 |
+
|
55 |
+
class Pipeline:
|
56 |
+
prefer_async = True
|
57 |
+
|
58 |
+
def __init__(self, endpoint_id, name, prompt_fn, stop_tokens=None):
|
59 |
+
self.endpoint_id = endpoint_id
|
60 |
+
self.name = name
|
61 |
+
self.prompt_fn = prompt_fn
|
62 |
+
stop_tokens = stop_tokens or []
|
63 |
+
self.generation_config = {
|
64 |
+
"max_new_tokens": 1024,
|
65 |
+
"top_k": 40,
|
66 |
+
"top_p": 0.90,
|
67 |
+
"temperature": 0.72,
|
68 |
+
"repetition_penalty": 1.22,
|
69 |
+
"last_n_tokens": 64,
|
70 |
+
"seed": -1,
|
71 |
+
"batch_size": 8,
|
72 |
+
"threads": -1,
|
73 |
+
"stop": ["</s>", "USER:", "### Instruction:"] + stop_tokens,
|
74 |
+
}
|
75 |
+
|
76 |
+
def get_generation_config(self):
|
77 |
+
return self.generation_config.copy()
|
78 |
+
|
79 |
+
def __call__(self, prompt, config=None) -> Generator[List[Dict[str, str]], None, None]:
|
80 |
+
input = config if config else self.generation_config.copy()
|
81 |
+
input["prompt"] = prompt
|
82 |
+
|
83 |
+
if self.prefer_async:
|
84 |
+
url = f"https://api.runpod.ai/v2/{self.endpoint_id}/run"
|
85 |
+
else:
|
86 |
+
url = f"https://api.runpod.ai/v2/{self.endpoint_id}/runsync"
|
87 |
+
headers = {
|
88 |
+
"Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}"
|
89 |
+
}
|
90 |
+
response = requests.post(url, headers=headers, json={"input": input})
|
91 |
+
|
92 |
+
if response.status_code == 200:
|
93 |
+
data = response.json()
|
94 |
+
task_id = data.get('id')
|
95 |
+
return self.stream_output(task_id)
|
96 |
+
|
97 |
+
def stream_output(self,task_id) -> Generator[List[Dict[str, str]], None, None]:
|
98 |
+
url = f"https://api.runpod.ai/v2/{self.endpoint_id}/stream/{task_id}"
|
99 |
+
headers = {
|
100 |
+
"Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}"
|
101 |
+
}
|
102 |
+
|
103 |
+
while True:
|
104 |
+
try:
|
105 |
+
response = requests.get(url, headers=headers)
|
106 |
+
if response.status_code == 200:
|
107 |
+
data = response.json()
|
108 |
+
yield [{"generated_text": "".join([s["output"] for s in data["stream"]])}]
|
109 |
+
if data.get('status') == 'COMPLETED':
|
110 |
+
return
|
111 |
+
elif response.status_code >= 400:
|
112 |
+
logging.error(response.json())
|
113 |
+
except ConnectionError:
|
114 |
+
pass
|
115 |
+
|
116 |
+
def poll_for_status(self, task_id):
|
117 |
+
url = f"https://api.runpod.ai/v2/{self.endpoint_id}/status/{task_id}"
|
118 |
+
headers = {
|
119 |
+
"Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}"
|
120 |
+
}
|
121 |
+
|
122 |
+
while True:
|
123 |
+
response = requests.get(url, headers=headers)
|
124 |
+
if response.status_code == 200:
|
125 |
+
data = response.json()
|
126 |
+
if data.get('status') == 'COMPLETED':
|
127 |
+
return [{"generated_text": data["output"]}]
|
128 |
+
elif response.status_code >= 400:
|
129 |
+
logging.error(response.json())
|
130 |
+
# Sleep for 3 seconds between each request
|
131 |
+
sleep(3)
|
132 |
+
|
133 |
+
def transform_prompt(self, system_msg, history):
|
134 |
+
return self.prompt_fn(system_msg, history)
|
135 |
+
|
136 |
+
|
137 |
+
AVAILABLE_MODELS = {
|
138 |
+
"hermes-13b": ("p0zqb2gkcwp0ww", prompt_instruct),
|
139 |
+
"manticore-13b-chat": ("u6tv84bpomhfei", prompt_chat),
|
140 |
+
"airoboros-13b": ("rglzxnk80660ja", prompt_chat),
|
141 |
+
"wizard-vicuna-13b": ("9vvpikt4ttyqos", prompt_chat),
|
142 |
+
"lmsys-vicuna-13b": ("2nlb32ydkaz6yd", prompt_chat),
|
143 |
+
"supercot-13b": ("0be7865dwxpwqk", prompt_instruct, ["Instruction:"]),
|
144 |
+
"mpt-7b-instruct": ("jpqbvnyluj18b0", prompt_instruct),
|
145 |
+
"guanaco-13b": ("yxl8w98z017mw2", prompt_instruct),
|
146 |
+
# "minotaur-13b": ("6f1baphxjpjk7b", prompt_chat),
|
147 |
+
"minotaur-13b-fixed": ("sjnkstd3e40ojj", prompt_roleplay),
|
148 |
+
"wizardlm-13b": ("k0chcxsgukov8x", prompt_instruct),
|
149 |
+
"selfee-13b": ("50rnvxln9bmf4c", prompt_instruct),
|
150 |
+
"robin-v2-13b": ("4cw4vwzzhsl5pq", prompt_human_instruct, ["###Human"]),
|
151 |
+
"minotaur-15b-8k": ("zdk804d2txtt68", prompt_chat),
|
152 |
+
}
|
153 |
+
|
154 |
+
OAAIC_MODELS = [
|
155 |
+
"minotaur-15b-8k",
|
156 |
+
"minotaur-13b-fixed",
|
157 |
+
"manticore-13b-chat",
|
158 |
+
# "minotaur-mpt-7b",
|
159 |
+
]
|
160 |
+
OAAIC_MODELS_ROLEPLAY = {
|
161 |
+
"manticore-13b-chat-roleplay": ("u6tv84bpomhfei", prompt_roleplay),
|
162 |
+
"minotaur-13b-roleplay": ("6f1baphxjpjk7b", prompt_roleplay),
|
163 |
+
"minotaur-13b-fixed-roleplay": ("sjnkstd3e40ojj", prompt_roleplay),
|
164 |
+
"minotaur-15b-8k-roleplay": ("zdk804d2txtt68", prompt_roleplay),
|
165 |
+
# "minotaur-mpt-7b": ("vm1wcsje126x1x", prompt_chat),
|
166 |
+
}
|
167 |
+
|
168 |
+
_memoized_models = defaultdict()
|
169 |
+
|
170 |
+
|
171 |
+
def get_model_pipeline(model_name):
|
172 |
+
if not _memoized_models.get(model_name):
|
173 |
+
kwargs = {}
|
174 |
+
if model_name in AVAILABLE_MODELS:
|
175 |
+
if len(AVAILABLE_MODELS[model_name]) >= 3:
|
176 |
+
kwargs["stop_tokens"] = AVAILABLE_MODELS[model_name][2]
|
177 |
+
_memoized_models[model_name] = Pipeline(AVAILABLE_MODELS[model_name][0], model_name, AVAILABLE_MODELS[model_name][1], **kwargs)
|
178 |
+
elif model_name in OAAIC_MODELS_ROLEPLAY:
|
179 |
+
_memoized_models[model_name] = Pipeline(OAAIC_MODELS_ROLEPLAY[model_name][0], model_name, OAAIC_MODELS_ROLEPLAY[model_name][1], **kwargs)
|
180 |
+
return _memoized_models.get(model_name)
|
181 |
+
|
182 |
+
start_message = """Below is a dialogue between a USER and an ASSISTANT. The USER may ask questions, request information, or provide instructions for a task, often supplementing with additional context. The ASSISTANT responds accurately and effectively, offering insights, answering questions, or executing tasks to the best of its ability based on the given information.
|
183 |
+
"""
|
184 |
+
|
185 |
+
|
186 |
+
def user(message, nudge_msg, history1, history2):
|
187 |
+
history1 = history1 or []
|
188 |
+
history2 = history2 or []
|
189 |
+
# Append the user's message to the conversation history
|
190 |
+
history1.append([message, nudge_msg])
|
191 |
+
history2.append([message, nudge_msg])
|
192 |
+
|
193 |
+
return "", nudge_msg, history1, history2
|
194 |
+
|
195 |
+
|
196 |
+
def token_generator(generator1, generator2, mapping_fn=None, fillvalue=None):
|
197 |
+
if not fillvalue:
|
198 |
+
fillvalue = ''
|
199 |
+
if not mapping_fn:
|
200 |
+
mapping_fn = lambda x: x
|
201 |
+
for output1, output2 in itertools.zip_longest(generator1, generator2, fillvalue=fillvalue):
|
202 |
+
tokens1 = re.findall(r'(.*?)(\s|$)', mapping_fn(output1))
|
203 |
+
tokens2 = re.findall(r'(.*?)(\s|$)', mapping_fn(output2))
|
204 |
+
|
205 |
+
for token1, token2 in itertools.zip_longest(tokens1, tokens2, fillvalue=''):
|
206 |
+
yield "".join(token1), "".join(token2)
|
207 |
+
|
208 |
+
|
209 |
+
def chat(history1, history2, system_msg, state):
|
210 |
+
history1 = history1 or []
|
211 |
+
history2 = history2 or []
|
212 |
+
|
213 |
+
arena_bots = None
|
214 |
+
if state and "models" in state and state['models']:
|
215 |
+
arena_bots = state['models']
|
216 |
+
if not arena_bots:
|
217 |
+
arena_bots = list(AVAILABLE_MODELS.keys())
|
218 |
+
random.shuffle(arena_bots)
|
219 |
+
# bootstrap a new bot into the arena more often
|
220 |
+
if "minotaur-15b-8k" not in arena_bots[0:2] and random.choice([True, False, False]):
|
221 |
+
arena_bots.insert(random.choice([0,1]), "minotaur-15b-8k")
|
222 |
+
|
223 |
+
battle = arena_bots[0:2]
|
224 |
+
model1 = get_model_pipeline(battle[0])
|
225 |
+
model2 = get_model_pipeline(battle[1])
|
226 |
+
|
227 |
+
messages1 = model1.transform_prompt(system_msg, history1)
|
228 |
+
messages2 = model2.transform_prompt(system_msg, history2)
|
229 |
+
|
230 |
+
# remove last space from assistant, some models output a ZWSP if you leave a space
|
231 |
+
messages1 = messages1.rstrip()
|
232 |
+
messages2 = messages2.rstrip()
|
233 |
+
|
234 |
+
model1_res = model1(messages1) # type: Generator[str, None, None]
|
235 |
+
model2_res = model2(messages2) # type: Generator[str, None, None]
|
236 |
+
res = token_generator(model1_res, model2_res, lambda x: x[0]['generated_text'], fillvalue=[{'generated_text': ''}]) # type: Generator[Tuple[str, str], None, None]
|
237 |
+
logging.info({"models": [model1.name, model2.name]})
|
238 |
+
for t1, t2 in res:
|
239 |
+
if t1 is not None:
|
240 |
+
history1[-1][1] += t1
|
241 |
+
if t2 is not None:
|
242 |
+
history2[-1][1] += t2
|
243 |
+
# stream the response
|
244 |
+
# [arena_chatbot1, arena_chatbot2, arena_message, reveal1, reveal2, arena_state]
|
245 |
+
yield history1, history2, "", gr.update(value=battle[0]), gr.update(value=battle[1]), {"models": [model1.name, model2.name]}
|
246 |
+
sleep(0.05)
|
247 |
+
|
248 |
+
|
249 |
+
def chosen_one(label, choice1_history, choice2_history, system_msg, nudge_msg, rlhf_persona, state):
|
250 |
+
if not state:
|
251 |
+
logging.error("missing state!!!")
|
252 |
+
# Generate a uuid for each submission
|
253 |
+
arena_battle_id = str(uuid.uuid4())
|
254 |
+
|
255 |
+
# Get the current timestamp
|
256 |
+
timestamp = datetime.datetime.now().isoformat()
|
257 |
+
|
258 |
+
# Put the item in the table
|
259 |
+
table.put_item(
|
260 |
+
Item={
|
261 |
+
'arena_battle_id': arena_battle_id,
|
262 |
+
'timestamp': timestamp,
|
263 |
+
'system_msg': system_msg,
|
264 |
+
'nudge_prefix': nudge_msg,
|
265 |
+
'choice1_name': state["models"][0],
|
266 |
+
'choice1': choice1_history,
|
267 |
+
'choice2_name': state["models"][1],
|
268 |
+
'choice2': choice2_history,
|
269 |
+
'label': label,
|
270 |
+
'rlhf_persona': rlhf_persona,
|
271 |
+
}
|
272 |
+
)
|
273 |
+
|
274 |
+
chosen_one_first = functools.partial(chosen_one, 1)
|
275 |
+
chosen_one_second = functools.partial(chosen_one, 2)
|
276 |
+
chosen_one_tie = functools.partial(chosen_one, 0)
|
277 |
+
chosen_one_suck = functools.partial(chosen_one, 1)
|
278 |
+
|
279 |
+
leaderboard_intro = """### TBD
|
280 |
+
- This is very much a work-in-progress, if you'd like to help build this out, join us on [Discord](https://discord.gg/QYF8QrtEUm)
|
281 |
+
|
282 |
+
"""
|
283 |
+
elo_scores = load_dataset("openaccess-ai-collective/chatbot-arena-elo-scores")
|
284 |
+
elo_scores = elo_scores["train"].sort("elo_score", reverse=True)
|
285 |
+
|
286 |
+
|
287 |
+
def refresh_md():
|
288 |
+
return leaderboard_intro + "\n" + dataset_to_markdown()
|
289 |
+
|
290 |
+
|
291 |
+
def fetch_elo_scores():
|
292 |
+
elo_scores = load_dataset("openaccess-ai-collective/chatbot-arena-elo-scores")
|
293 |
+
elo_scores = elo_scores["train"].sort("elo_score", reverse=True)
|
294 |
+
return elo_scores
|
295 |
+
|
296 |
+
|
297 |
+
def dataset_to_markdown():
|
298 |
+
dataset = fetch_elo_scores()
|
299 |
+
# Get column names (dataset features)
|
300 |
+
columns = list(dataset.features.keys())
|
301 |
+
# Start markdown string with table headers
|
302 |
+
markdown_string = "| " + " | ".join(columns) + " |\n"
|
303 |
+
# Add markdown table row separator for headers
|
304 |
+
markdown_string += "| " + " | ".join("---" for _ in columns) + " |\n"
|
305 |
+
|
306 |
+
# Add each row from dataset to the markdown string
|
307 |
+
for i in range(len(dataset)):
|
308 |
+
row = dataset[i]
|
309 |
+
markdown_string += "| " + " | ".join(str(row[column]) for column in columns) + " |\n"
|
310 |
+
|
311 |
+
return markdown_string
|
312 |
+
|
313 |
+
|
314 |
+
"""
|
315 |
+
OpenAccess AI Chatbots chat
|
316 |
+
"""
|
317 |
+
|
318 |
+
def open_clear_chat(chat_history_state, chat_message, nudge_msg):
|
319 |
+
chat_history_state = []
|
320 |
+
chat_message = ''
|
321 |
+
nudge_msg = ''
|
322 |
+
return chat_history_state, chat_message, nudge_msg
|
323 |
+
|
324 |
+
|
325 |
+
def open_user(message, nudge_msg, history):
|
326 |
+
history = history or []
|
327 |
+
# Append the user's message to the conversation history
|
328 |
+
history.append([message, nudge_msg])
|
329 |
+
return "", nudge_msg, history
|
330 |
+
|
331 |
+
|
332 |
+
def open_chat(model_name, history, system_msg, max_new_tokens, temperature, top_p, top_k, repetition_penalty):
|
333 |
+
history = history or []
|
334 |
+
|
335 |
+
model = get_model_pipeline(model_name)
|
336 |
+
config = model.get_generation_config()
|
337 |
+
config["max_new_tokens"] = max_new_tokens
|
338 |
+
config["temperature"] = temperature
|
339 |
+
config["temperature"] = temperature
|
340 |
+
config["top_p"] = top_p
|
341 |
+
config["top_k"] = top_k
|
342 |
+
config["repetition_penalty"] = repetition_penalty
|
343 |
+
|
344 |
+
messages = model.transform_prompt(system_msg, history)
|
345 |
+
|
346 |
+
# remove last space from assistant, some models output a ZWSP if you leave a space
|
347 |
+
messages = messages.rstrip()
|
348 |
+
|
349 |
+
model_res = model(messages, config=config) # type: Generator[List[Dict[str, str]], None, None]
|
350 |
+
for res in model_res:
|
351 |
+
# tokens = re.findall(r'\s*\S+\s*', res[0]['generated_text'])
|
352 |
+
tokens = re.findall(r'(.*?)(\s|$)', res[0]['generated_text'])
|
353 |
+
for subtoken in tokens:
|
354 |
+
subtoken = "".join(subtoken)
|
355 |
+
history[-1][1] += subtoken
|
356 |
+
# stream the response
|
357 |
+
yield history, history, ""
|
358 |
+
sleep(0.01)
|
359 |
+
|
360 |
+
|
361 |
+
def open_rp_chat(model_name, history, system_msg, max_new_tokens, temperature, top_p, top_k, repetition_penalty):
|
362 |
+
history = history or []
|
363 |
+
|
364 |
+
model = get_model_pipeline(f"{model_name}-roleplay")
|
365 |
+
config = model.get_generation_config()
|
366 |
+
config["max_new_tokens"] = max_new_tokens
|
367 |
+
config["temperature"] = temperature
|
368 |
+
config["temperature"] = temperature
|
369 |
+
config["top_p"] = top_p
|
370 |
+
config["top_k"] = top_k
|
371 |
+
config["repetition_penalty"] = repetition_penalty
|
372 |
+
|
373 |
+
messages = model.transform_prompt(system_msg, history)
|
374 |
+
|
375 |
+
# remove last space from assistant, some models output a ZWSP if you leave a space
|
376 |
+
messages = messages.rstrip()
|
377 |
+
|
378 |
+
model_res = model(messages, config=config) # type: Generator[List[Dict[str, str]], None, None]
|
379 |
+
for res in model_res:
|
380 |
+
tokens = re.findall(r'(.*?)(\s|$)', res[0]['generated_text'])
|
381 |
+
# tokens = re.findall(r'\s*\S+\s*', res[0]['generated_text'])
|
382 |
+
for subtoken in tokens:
|
383 |
+
subtoken = "".join(subtoken)
|
384 |
+
history[-1][1] += subtoken
|
385 |
+
# stream the response
|
386 |
+
yield history, history, ""
|
387 |
+
sleep(0.01)
|
388 |
+
|
389 |
+
|
390 |
+
with gr.Blocks() as arena:
|
391 |
+
with gr.Row():
|
392 |
+
with gr.Column():
|
393 |
+
gr.Markdown(f"""
|
394 |
+
### brought to you by OpenAccess AI Collective
|
395 |
+
- Checkout out [our writeup on how this was built.](https://medium.com/@winglian/inference-any-llm-with-serverless-in-15-minutes-69eeb548a41d)
|
396 |
+
- This Space runs on CPU only, and uses GGML with GPU support via Runpod Serverless.
|
397 |
+
- Responses may not stream immediately due to cold starts on Serverless.
|
398 |
+
- Some responses WILL take AT LEAST 20 seconds to respond
|
399 |
+
- The Chatbot Arena (for now), is single turn only. Responses will be cleared after submission.
|
400 |
+
- Responses from the Arena will be used for building reward models. These reward models can be bucketed by Personas.
|
401 |
+
- [💵 Consider Donating on our Patreon](http://patreon.com/OpenAccessAICollective) or become a [GitHub Sponsor](https://github.com/sponsors/OpenAccess-AI-Collective)
|
402 |
+
- Join us on [Discord](https://discord.gg/PugNNHAF5r)
|
403 |
+
""")
|
404 |
+
with gr.Tab("Chatbot Arena"):
|
405 |
+
with gr.Row():
|
406 |
+
with gr.Column():
|
407 |
+
arena_chatbot1 = gr.Chatbot(label="Chatbot A")
|
408 |
+
with gr.Column():
|
409 |
+
arena_chatbot2 = gr.Chatbot(label="Chatbot B")
|
410 |
+
with gr.Row():
|
411 |
+
choose1 = gr.Button(value="👈 Prefer left (A)", variant="secondary", visible=False).style(full_width=True)
|
412 |
+
choose2 = gr.Button(value="👉 Prefer right (B)", variant="secondary", visible=False).style(full_width=True)
|
413 |
+
choose3 = gr.Button(value="🤝 Tie", variant="secondary", visible=False).style(full_width=True)
|
414 |
+
choose4 = gr.Button(value="🤮 Both are bad", variant="secondary", visible=False).style(full_width=True)
|
415 |
+
with gr.Row():
|
416 |
+
reveal1 = gr.Textbox(label="Model Name", value="", interactive=False, visible=False).style(full_width=True)
|
417 |
+
reveal2 = gr.Textbox(label="Model Name", value="", interactive=False, visible=False).style(full_width=True)
|
418 |
+
with gr.Row():
|
419 |
+
dismiss_reveal = gr.Button(value="Dismiss & Continue", variant="secondary", visible=False).style(full_width=True)
|
420 |
+
with gr.Row():
|
421 |
+
with gr.Column():
|
422 |
+
arena_message = gr.Textbox(
|
423 |
+
label="What do you want to ask?",
|
424 |
+
placeholder="Ask me anything.",
|
425 |
+
lines=3,
|
426 |
+
)
|
427 |
+
with gr.Column():
|
428 |
+
arena_rlhf_persona = gr.Textbox(
|
429 |
+
"", label="Persona Tags", interactive=True, visible=True, placeholder="Tell us about how you are judging the quality. ex: #CoT #SFW #NSFW #helpful #ethical #creativity", lines=2)
|
430 |
+
arena_system_msg = gr.Textbox(
|
431 |
+
start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt", lines=8)
|
432 |
+
|
433 |
+
arena_nudge_msg = gr.Textbox(
|
434 |
+
"", label="Assistant Nudge", interactive=True, visible=True, placeholder="the first words of the assistant response to nudge them in the right direction.", lines=2)
|
435 |
+
with gr.Row():
|
436 |
+
arena_submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
|
437 |
+
arena_clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
|
438 |
+
# arena_regenerate = gr.Button(value="Regenerate", variant="secondary").style(full_width=False)
|
439 |
+
arena_state = gr.State({})
|
440 |
+
|
441 |
+
arena_clear.click(lambda: None, None, arena_chatbot1, queue=False)
|
442 |
+
arena_clear.click(lambda: None, None, arena_chatbot2, queue=False)
|
443 |
+
arena_clear.click(lambda: None, None, arena_message, queue=False)
|
444 |
+
arena_clear.click(lambda: None, None, arena_nudge_msg, queue=False)
|
445 |
+
arena_clear.click(lambda: None, None, arena_state, queue=False)
|
446 |
+
|
447 |
+
submit_click_event = arena_submit.click(
|
448 |
+
lambda *args: (
|
449 |
+
gr.update(visible=False, interactive=False),
|
450 |
+
gr.update(visible=False),
|
451 |
+
gr.update(visible=False),
|
452 |
+
),
|
453 |
+
inputs=[], outputs=[arena_message, arena_clear, arena_submit], queue=True
|
454 |
+
).then(
|
455 |
+
fn=user, inputs=[arena_message, arena_nudge_msg, arena_chatbot1, arena_chatbot2], outputs=[arena_message, arena_nudge_msg, arena_chatbot1, arena_chatbot2], queue=True
|
456 |
+
).then(
|
457 |
+
fn=chat, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg, arena_state], outputs=[arena_chatbot1, arena_chatbot2, arena_message, reveal1, reveal2, arena_state], queue=True
|
458 |
+
).then(
|
459 |
+
lambda *args: (
|
460 |
+
gr.update(visible=False, interactive=False),
|
461 |
+
gr.update(visible=True),
|
462 |
+
gr.update(visible=True),
|
463 |
+
gr.update(visible=True),
|
464 |
+
gr.update(visible=True),
|
465 |
+
gr.update(visible=False),
|
466 |
+
gr.update(visible=False),
|
467 |
+
),
|
468 |
+
inputs=[arena_message, arena_nudge_msg, arena_system_msg], outputs=[arena_message, choose1, choose2, choose3, choose4, arena_clear, arena_submit], queue=True
|
469 |
+
)
|
470 |
+
|
471 |
+
choose1_click_event = choose1.click(
|
472 |
+
fn=chosen_one_first, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg, arena_nudge_msg, arena_rlhf_persona, arena_state], outputs=[], queue=True
|
473 |
+
).then(
|
474 |
+
lambda *args: (
|
475 |
+
gr.update(visible=False),
|
476 |
+
gr.update(visible=False),
|
477 |
+
gr.update(visible=False),
|
478 |
+
gr.update(visible=False),
|
479 |
+
gr.update(visible=True),
|
480 |
+
gr.update(visible=True),
|
481 |
+
gr.update(visible=True),
|
482 |
+
),
|
483 |
+
inputs=[], outputs=[choose1, choose2, choose3, choose4, dismiss_reveal, reveal1, reveal2], queue=True
|
484 |
+
)
|
485 |
+
|
486 |
+
choose2_click_event = choose2.click(
|
487 |
+
fn=chosen_one_second, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg, arena_nudge_msg, arena_rlhf_persona, arena_state], outputs=[], queue=True
|
488 |
+
).then(
|
489 |
+
lambda *args: (
|
490 |
+
gr.update(visible=False),
|
491 |
+
gr.update(visible=False),
|
492 |
+
gr.update(visible=False),
|
493 |
+
gr.update(visible=False),
|
494 |
+
gr.update(visible=True),
|
495 |
+
gr.update(visible=True),
|
496 |
+
gr.update(visible=True),
|
497 |
+
),
|
498 |
+
inputs=[], outputs=[choose1, choose2, choose3, choose4, dismiss_reveal, reveal1, reveal2], queue=True
|
499 |
+
)
|
500 |
+
|
501 |
+
choose3_click_event = choose3.click(
|
502 |
+
fn=chosen_one_tie, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg, arena_nudge_msg, arena_rlhf_persona, arena_state], outputs=[], queue=True
|
503 |
+
).then(
|
504 |
+
lambda *args: (
|
505 |
+
gr.update(visible=False),
|
506 |
+
gr.update(visible=False),
|
507 |
+
gr.update(visible=False),
|
508 |
+
gr.update(visible=False),
|
509 |
+
gr.update(visible=True),
|
510 |
+
gr.update(visible=True),
|
511 |
+
gr.update(visible=True),
|
512 |
+
),
|
513 |
+
inputs=[], outputs=[choose1, choose2, choose3, choose4, dismiss_reveal, reveal1, reveal2], queue=True
|
514 |
+
)
|
515 |
+
|
516 |
+
choose4_click_event = choose4.click(
|
517 |
+
fn=chosen_one_suck, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg, arena_nudge_msg, arena_rlhf_persona, arena_state], outputs=[], queue=True
|
518 |
+
).then(
|
519 |
+
lambda *args: (
|
520 |
+
gr.update(visible=False),
|
521 |
+
gr.update(visible=False),
|
522 |
+
gr.update(visible=False),
|
523 |
+
gr.update(visible=False),
|
524 |
+
gr.update(visible=True),
|
525 |
+
gr.update(visible=True),
|
526 |
+
gr.update(visible=True),
|
527 |
+
),
|
528 |
+
inputs=[], outputs=[choose1, choose2, choose3, choose4, dismiss_reveal, reveal1, reveal2], queue=True
|
529 |
+
)
|
530 |
+
|
531 |
+
dismiss_click_event = dismiss_reveal.click(
|
532 |
+
lambda *args: (
|
533 |
+
gr.update(visible=True, interactive=True),
|
534 |
+
gr.update(visible=False),
|
535 |
+
gr.update(visible=True),
|
536 |
+
gr.update(visible=True),
|
537 |
+
gr.update(visible=False),
|
538 |
+
gr.update(visible=False),
|
539 |
+
None,
|
540 |
+
None,
|
541 |
+
None,
|
542 |
+
),
|
543 |
+
inputs=[], outputs=[
|
544 |
+
arena_message,
|
545 |
+
dismiss_reveal,
|
546 |
+
arena_clear, arena_submit,
|
547 |
+
reveal1, reveal2,
|
548 |
+
arena_chatbot1, arena_chatbot2,
|
549 |
+
arena_state,
|
550 |
+
], queue=True
|
551 |
+
)
|
552 |
+
with gr.Tab("Leaderboard"):
|
553 |
+
with gr.Column():
|
554 |
+
leaderboard_markdown = gr.Markdown(f"""{leaderboard_intro}
|
555 |
+
{dataset_to_markdown()}
|
556 |
+
""")
|
557 |
+
leaderboad_refresh = gr.Button(value="Refresh Leaderboard", variant="secondary").style(full_width=True)
|
558 |
+
leaderboad_refresh.click(fn=refresh_md, inputs=[], outputs=[leaderboard_markdown])
|
559 |
+
with gr.Tab("OAAIC Chatbots"):
|
560 |
+
gr.Markdown("# GGML Spaces Chatbot Demo")
|
561 |
+
open_model_choice = gr.Dropdown(label="Model", choices=OAAIC_MODELS, value=OAAIC_MODELS[0])
|
562 |
+
open_chatbot = gr.Chatbot().style(height=400)
|
563 |
+
with gr.Row():
|
564 |
+
open_message = gr.Textbox(
|
565 |
+
label="What do you want to chat about?",
|
566 |
+
placeholder="Ask me anything.",
|
567 |
+
lines=3,
|
568 |
+
)
|
569 |
+
with gr.Row():
|
570 |
+
open_submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
|
571 |
+
open_roleplay = gr.Button(value="Roleplay", variant="secondary").style(full_width=True)
|
572 |
+
open_clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
|
573 |
+
open_stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
|
574 |
+
with gr.Row():
|
575 |
+
with gr.Column():
|
576 |
+
open_max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=300)
|
577 |
+
open_temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8)
|
578 |
+
open_top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95)
|
579 |
+
open_top_k = gr.Slider(0, 100, label="Top K", step=1, value=40)
|
580 |
+
open_repetition_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1)
|
581 |
+
|
582 |
+
open_system_msg = gr.Textbox(
|
583 |
+
start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt, useful for RP", lines=5)
|
584 |
+
|
585 |
+
open_nudge_msg = gr.Textbox(
|
586 |
+
"", label="Assistant Nudge", interactive=True, visible=True, placeholder="the first words of the assistant response to nudge them in the right direction.", lines=1)
|
587 |
+
|
588 |
+
open_chat_history_state = gr.State()
|
589 |
+
open_clear.click(open_clear_chat, inputs=[open_chat_history_state, open_message, open_nudge_msg], outputs=[open_chat_history_state, open_message, open_nudge_msg], queue=False)
|
590 |
+
open_clear.click(lambda: None, None, open_chatbot, queue=False)
|
591 |
+
|
592 |
+
open_submit_click_event = open_submit.click(
|
593 |
+
fn=open_user, inputs=[open_message, open_nudge_msg, open_chat_history_state], outputs=[open_message, open_nudge_msg, open_chat_history_state], queue=True
|
594 |
+
).then(
|
595 |
+
fn=open_chat, inputs=[open_model_choice, open_chat_history_state, open_system_msg, open_max_tokens, open_temperature, open_top_p, open_top_k, open_repetition_penalty], outputs=[open_chatbot, open_chat_history_state, open_message], queue=True
|
596 |
+
)
|
597 |
+
open_roleplay_click_event = open_roleplay.click(
|
598 |
+
fn=open_user, inputs=[open_message, open_nudge_msg, open_chat_history_state], outputs=[open_message, open_nudge_msg, open_chat_history_state], queue=True
|
599 |
+
).then(
|
600 |
+
fn=open_rp_chat, inputs=[open_model_choice, open_chat_history_state, open_system_msg, open_max_tokens, open_temperature, open_top_p, open_top_k, open_repetition_penalty], outputs=[open_chatbot, open_chat_history_state, open_message], queue=True
|
601 |
+
)
|
602 |
+
open_stop.click(fn=None, inputs=None, outputs=None, cancels=[open_submit_click_event, open_roleplay_click_event], queue=False)
|
603 |
+
|
604 |
+
arena.queue(concurrency_count=5, max_size=16).launch(debug=True, server_name="0.0.0.0", server_port=7860)
|
calculate_elo.py
ADDED
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import os
|
3 |
+
from datetime import datetime
|
4 |
+
from decimal import Decimal
|
5 |
+
from typing import List
|
6 |
+
|
7 |
+
import boto3
|
8 |
+
from boto3.dynamodb.conditions import Attr, Key
|
9 |
+
from datasets import Dataset
|
10 |
+
|
11 |
+
logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
|
12 |
+
|
13 |
+
# Create a DynamoDB client
|
14 |
+
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
|
15 |
+
|
16 |
+
|
17 |
+
def _create_arena_table():
|
18 |
+
dynamodb.create_table(
|
19 |
+
TableName='oaaic_chatbot_arena',
|
20 |
+
KeySchema=[
|
21 |
+
{
|
22 |
+
'AttributeName': 'arena_battle_id',
|
23 |
+
'KeyType': 'HASH'
|
24 |
+
},
|
25 |
+
],
|
26 |
+
AttributeDefinitions=[
|
27 |
+
{
|
28 |
+
'AttributeName': 'arena_battle_id',
|
29 |
+
'AttributeType': 'S'
|
30 |
+
},
|
31 |
+
{
|
32 |
+
'AttributeName': 'timestamp',
|
33 |
+
'AttributeType': 'S'
|
34 |
+
},
|
35 |
+
],
|
36 |
+
ProvisionedThroughput={
|
37 |
+
'ReadCapacityUnits': 5,
|
38 |
+
'WriteCapacityUnits': 5
|
39 |
+
},
|
40 |
+
GlobalSecondaryIndexes=[
|
41 |
+
{
|
42 |
+
'IndexName': 'TimestampIndex',
|
43 |
+
'KeySchema': [
|
44 |
+
{
|
45 |
+
'AttributeName': 'arena_battle_id',
|
46 |
+
'KeyType': 'HASH'
|
47 |
+
},
|
48 |
+
{
|
49 |
+
'AttributeName': 'timestamp',
|
50 |
+
'KeyType': 'RANGE'
|
51 |
+
},
|
52 |
+
],
|
53 |
+
'Projection': {
|
54 |
+
'ProjectionType': 'ALL',
|
55 |
+
},
|
56 |
+
'ProvisionedThroughput': {
|
57 |
+
'ReadCapacityUnits': 5,
|
58 |
+
'WriteCapacityUnits': 5,
|
59 |
+
}
|
60 |
+
},
|
61 |
+
]
|
62 |
+
)
|
63 |
+
|
64 |
+
def _create_elo_scores_table():
|
65 |
+
dynamodb.create_table(
|
66 |
+
TableName='elo_scores',
|
67 |
+
KeySchema=[
|
68 |
+
{
|
69 |
+
'AttributeName': 'chatbot_name',
|
70 |
+
'KeyType': 'HASH' # Partition key
|
71 |
+
},
|
72 |
+
],
|
73 |
+
AttributeDefinitions=[
|
74 |
+
{
|
75 |
+
'AttributeName': 'chatbot_name',
|
76 |
+
'AttributeType': 'S'
|
77 |
+
},
|
78 |
+
],
|
79 |
+
ProvisionedThroughput={
|
80 |
+
'ReadCapacityUnits': 5,
|
81 |
+
'WriteCapacityUnits': 5
|
82 |
+
}
|
83 |
+
)
|
84 |
+
|
85 |
+
|
86 |
+
def _create_elo_logs_table():
|
87 |
+
dynamodb.create_table(
|
88 |
+
TableName='elo_logs',
|
89 |
+
KeySchema=[
|
90 |
+
{
|
91 |
+
'AttributeName': 'arena_battle_id',
|
92 |
+
'KeyType': 'HASH' # Partition key
|
93 |
+
},
|
94 |
+
{
|
95 |
+
'AttributeName': 'battle_timestamp',
|
96 |
+
'KeyType': 'RANGE' # Sort key
|
97 |
+
},
|
98 |
+
],
|
99 |
+
AttributeDefinitions=[
|
100 |
+
{
|
101 |
+
'AttributeName': 'arena_battle_id',
|
102 |
+
'AttributeType': 'S'
|
103 |
+
},
|
104 |
+
{
|
105 |
+
'AttributeName': 'battle_timestamp',
|
106 |
+
'AttributeType': 'S'
|
107 |
+
},
|
108 |
+
{
|
109 |
+
'AttributeName': 'all',
|
110 |
+
'AttributeType': 'S'
|
111 |
+
}
|
112 |
+
],
|
113 |
+
ProvisionedThroughput={
|
114 |
+
'ReadCapacityUnits': 10,
|
115 |
+
'WriteCapacityUnits': 10
|
116 |
+
},
|
117 |
+
GlobalSecondaryIndexes=[
|
118 |
+
{
|
119 |
+
'IndexName': 'AllTimestampIndex',
|
120 |
+
'KeySchema': [
|
121 |
+
{
|
122 |
+
'AttributeName': 'all',
|
123 |
+
'KeyType': 'HASH' # Partition key for the GSI
|
124 |
+
},
|
125 |
+
{
|
126 |
+
'AttributeName': 'battle_timestamp',
|
127 |
+
'KeyType': 'RANGE' # Sort key for the GSI
|
128 |
+
}
|
129 |
+
],
|
130 |
+
'Projection': {
|
131 |
+
'ProjectionType': 'ALL'
|
132 |
+
},
|
133 |
+
'ProvisionedThroughput': {
|
134 |
+
'ReadCapacityUnits': 10,
|
135 |
+
'WriteCapacityUnits': 10
|
136 |
+
}
|
137 |
+
},
|
138 |
+
]
|
139 |
+
)
|
140 |
+
|
141 |
+
|
142 |
+
def get_unprocessed_battles(last_processed_timestamp):
|
143 |
+
# Use boto3 to create a DynamoDB resource and reference the table
|
144 |
+
table = dynamodb.Table('oaaic_chatbot_arena')
|
145 |
+
|
146 |
+
# Use a query to retrieve unprocessed battles in temporal order
|
147 |
+
response = table.scan(
|
148 |
+
FilterExpression=Attr('timestamp').gt(last_processed_timestamp),
|
149 |
+
# ScanIndexForward=True
|
150 |
+
)
|
151 |
+
|
152 |
+
return response['Items']
|
153 |
+
|
154 |
+
|
155 |
+
def calculate_elo(rating1, rating2, result, K=32):
|
156 |
+
# Convert ratings to float
|
157 |
+
rating1 = float(rating1)
|
158 |
+
rating2 = float(rating2)
|
159 |
+
|
160 |
+
# Calculate the expected outcomes
|
161 |
+
expected_outcome1 = 1.0 / (1.0 + 10.0 ** ((rating2 - rating1) / 400.0))
|
162 |
+
expected_outcome2 = 1.0 - expected_outcome1
|
163 |
+
|
164 |
+
# Calculate the new Elo ratings
|
165 |
+
new_rating1 = rating1 + K * (result - expected_outcome1)
|
166 |
+
new_rating2 = rating2 + K * ((1.0 - result) - expected_outcome2)
|
167 |
+
|
168 |
+
return Decimal(new_rating1).quantize(Decimal('0.00')), Decimal(new_rating2).quantize(Decimal('0.00'))
|
169 |
+
|
170 |
+
|
171 |
+
def get_last_processed_timestamp():
|
172 |
+
table = dynamodb.Table('elo_logs')
|
173 |
+
|
174 |
+
# Scan the table sorted by timestamp in descending order
|
175 |
+
response = table.query(
|
176 |
+
IndexName='AllTimestampIndex',
|
177 |
+
KeyConditionExpression=Key('all').eq('ALL'),
|
178 |
+
ScanIndexForward=False,
|
179 |
+
Limit=1
|
180 |
+
)
|
181 |
+
|
182 |
+
# If there are no items in the table, return a default timestamp
|
183 |
+
if not response['Items']:
|
184 |
+
return '1970-01-01T00:00:00'
|
185 |
+
|
186 |
+
# Otherwise, return the timestamp of the latest item
|
187 |
+
return response['Items'][0]['battle_timestamp']
|
188 |
+
|
189 |
+
|
190 |
+
def log_elo_update(arena_battle_id, battle_timestamp, new_rating1, new_rating2):
|
191 |
+
# Reference the elo_logs table
|
192 |
+
table = dynamodb.Table('elo_logs')
|
193 |
+
|
194 |
+
# Update the table
|
195 |
+
table.put_item(
|
196 |
+
Item={
|
197 |
+
'arena_battle_id': arena_battle_id,
|
198 |
+
'battle_timestamp': battle_timestamp, # Use the timestamp of the battle
|
199 |
+
'log_timestamp': datetime.now().isoformat(), # Also store the timestamp of the log for completeness
|
200 |
+
'new_rating1': new_rating1,
|
201 |
+
'new_rating2': new_rating2,
|
202 |
+
'all': 'ALL',
|
203 |
+
}
|
204 |
+
)
|
205 |
+
|
206 |
+
|
207 |
+
def get_elo_score(chatbot_name, elo_scores):
|
208 |
+
if chatbot_name in elo_scores:
|
209 |
+
return elo_scores[chatbot_name]
|
210 |
+
|
211 |
+
table = dynamodb.Table('elo_scores')
|
212 |
+
response = table.get_item(Key={'chatbot_name': chatbot_name})
|
213 |
+
|
214 |
+
# If there is no item in the table, return a default score
|
215 |
+
if 'Item' not in response:
|
216 |
+
return 1500
|
217 |
+
|
218 |
+
return response['Item']['elo_score']
|
219 |
+
|
220 |
+
|
221 |
+
def update_elo_score(chatbot_name, new_elo_score):
|
222 |
+
table = dynamodb.Table('elo_scores')
|
223 |
+
|
224 |
+
# This will create a new item if it doesn't exist
|
225 |
+
table.put_item(
|
226 |
+
Item={
|
227 |
+
'chatbot_name': chatbot_name,
|
228 |
+
'elo_score': Decimal(str(new_elo_score)),
|
229 |
+
}
|
230 |
+
)
|
231 |
+
|
232 |
+
|
233 |
+
def get_elo_scores():
|
234 |
+
table = dynamodb.Table('elo_scores')
|
235 |
+
|
236 |
+
response = table.scan()
|
237 |
+
data = response['Items']
|
238 |
+
|
239 |
+
return data
|
240 |
+
|
241 |
+
|
242 |
+
def _backfill_logs():
|
243 |
+
table = dynamodb.Table('elo_logs')
|
244 |
+
|
245 |
+
# Initialize the scan operation
|
246 |
+
response = table.scan()
|
247 |
+
|
248 |
+
for item in response['Items']:
|
249 |
+
table.update_item(
|
250 |
+
Key={
|
251 |
+
'arena_battle_id': item['arena_battle_id'],
|
252 |
+
'battle_timestamp': item['battle_timestamp']
|
253 |
+
},
|
254 |
+
UpdateExpression="SET #all = :value",
|
255 |
+
ExpressionAttributeNames={
|
256 |
+
'#all': 'all'
|
257 |
+
},
|
258 |
+
ExpressionAttributeValues={
|
259 |
+
':value': 'ALL'
|
260 |
+
}
|
261 |
+
)
|
262 |
+
|
263 |
+
def main():
|
264 |
+
last_processed_timestamp = get_last_processed_timestamp()
|
265 |
+
battles: List[dict] = get_unprocessed_battles(last_processed_timestamp)
|
266 |
+
battles = sorted(battles, key=lambda x: x['timestamp'])
|
267 |
+
elo_scores = {}
|
268 |
+
|
269 |
+
for battle in battles:
|
270 |
+
print(repr(battle))
|
271 |
+
if battle['label'] in {-1, 0, 1, 2}:
|
272 |
+
outcome = battle['label']
|
273 |
+
for chatbot_name in [battle['choice1_name'], battle['choice2_name']]:
|
274 |
+
if chatbot_name not in elo_scores:
|
275 |
+
elo_scores[chatbot_name] = get_elo_score(chatbot_name, elo_scores)
|
276 |
+
# 1: This means that the first player (or team) won the match.
|
277 |
+
# 0.5: This means that the match ended in a draw.
|
278 |
+
# 0: This means that the first player (or team) lost the match.
|
279 |
+
if outcome == 0 or outcome == -1:
|
280 |
+
elo_result = 0.5
|
281 |
+
elif outcome == 1:
|
282 |
+
elo_result = 1
|
283 |
+
else:
|
284 |
+
elo_result = 0
|
285 |
+
|
286 |
+
new_rating1, new_rating2 = calculate_elo(elo_scores[battle['choice1_name']], elo_scores[battle['choice2_name']], elo_result)
|
287 |
+
logging.info(f"{battle['choice1_name']}: {elo_scores[battle['choice1_name']]} -> {new_rating1} | {battle['choice2_name']}: {elo_scores[battle['choice2_name']]} -> {new_rating2}")
|
288 |
+
elo_scores[battle['choice1_name']] = new_rating1
|
289 |
+
elo_scores[battle['choice2_name']] = new_rating2
|
290 |
+
log_elo_update(battle['arena_battle_id'], battle['timestamp'], new_rating1, new_rating2)
|
291 |
+
update_elo_score(battle['choice1_name'], new_rating1)
|
292 |
+
update_elo_score(battle['choice2_name'], new_rating2)
|
293 |
+
elo_scores[battle['choice1_name']] = new_rating1
|
294 |
+
elo_scores[battle['choice2_name']] = new_rating2
|
295 |
+
|
296 |
+
elo_scores = get_elo_scores()
|
297 |
+
for i, j in enumerate(elo_scores):
|
298 |
+
j["elo_score"] = float(j["elo_score"])
|
299 |
+
elo_scores[i] = j
|
300 |
+
print(elo_scores)
|
301 |
+
|
302 |
+
if battles:
|
303 |
+
# Convert the data into a format suitable for Hugging Face Dataset
|
304 |
+
elo_dataset = Dataset.from_list(elo_scores)
|
305 |
+
elo_dataset.push_to_hub("openaccess-ai-collective/chatbot-arena-elo-scores", private=False)
|
306 |
+
|
307 |
+
|
308 |
+
if __name__ == "__main__":
|
309 |
+
main()
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
pyyaml
|
2 |
+
requests
|
3 |
+
boto3
|