Spaces:
Sleeping
Sleeping
justinxzhao
commited on
Commit
·
c0a5a18
1
Parent(s):
cf367e2
Streaming working, with different providers.
Browse files- app.py +310 -16
- requirements.txt +5 -1
app.py
CHANGED
@@ -1,27 +1,321 @@
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
import dotenv
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
dotenv.load_dotenv()
|
6 |
|
7 |
-
|
8 |
PASSWORD = os.getenv("APP_PASSWORD")
|
9 |
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
st.warning("Please log in to access this app.")
|
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
import dotenv
|
4 |
+
import openai
|
5 |
+
from openai import OpenAI
|
6 |
+
import anthropic
|
7 |
+
from together import Together
|
8 |
+
import google.generativeai as genai
|
9 |
+
import time
|
10 |
|
11 |
dotenv.load_dotenv()
|
12 |
|
|
|
13 |
PASSWORD = os.getenv("APP_PASSWORD")
|
14 |
|
15 |
+
# Load API keys from environment variables
|
16 |
+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
17 |
+
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
|
18 |
+
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
19 |
+
TOGETHER_API_KEY = os.getenv("TOGETHER_API_KEY")
|
20 |
+
|
21 |
+
# Initialize API clients
|
22 |
+
together_client = Together(api_key=TOGETHER_API_KEY)
|
23 |
+
genai.configure(api_key=GOOGLE_API_KEY)
|
24 |
+
|
25 |
+
# Set up API clients for OpenAI and Anthropic
|
26 |
+
openai.api_key = OPENAI_API_KEY
|
27 |
+
openai_client = OpenAI(
|
28 |
+
organization="org-kUoRSK0nOw4W2nQYMVGWOt03",
|
29 |
+
project="proj_zb6k1DdgnSEbiAEMWxSOVVu4",
|
30 |
+
)
|
31 |
+
# anthropic_client = anthropic.Client(api_key=ANTHROPIC_API_KEY)
|
32 |
+
anthropic_client = anthropic.Anthropic()
|
33 |
+
|
34 |
+
LLM_COUNCIL_MEMBERS = {
|
35 |
+
"Smalls": [
|
36 |
+
"openai://gpt-4o-mini",
|
37 |
+
"together://meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
38 |
+
"vertex://gemini-1.5-flash-001",
|
39 |
+
"anthropic://claude-3-haiku-20240307",
|
40 |
+
],
|
41 |
+
"Flagships": [
|
42 |
+
"openai://gpt-4",
|
43 |
+
"together://meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
44 |
+
"vertex://gemini-1.5-pro-001",
|
45 |
+
"anthropic://claude-3-5-sonnet",
|
46 |
+
],
|
47 |
+
}
|
48 |
+
|
49 |
+
PROVIDER_TO_AVATAR_MAP = {
|
50 |
+
"openai://gpt-4o-mini": "data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIwLjk5ZW0iIGhlaWdodD0iMWVtIiB2aWV3Qm94PSIwIDAgMjU2IDI2MCI+PHBhdGggZD0iTTIzOS4xODQgMTA2LjIwM2E2NC43MiA2NC43MiAwIDAgMC01LjU3Ni01My4xMDNDMjE5LjQ1MiAyOC40NTkgMTkxIDE1Ljc4NCAxNjMuMjEzIDIxLjc0QTY1LjU4NiA2NS41ODYgMCAwIDAgNTIuMDk2IDQ1LjIyYTY0LjcyIDY0LjcyIDAgMCAwLTQzLjIzIDMxLjM2Yy0xNC4zMSAyNC42MDItMTEuMDYxIDU1LjYzNCA4LjAzMyA3Ni43NGE2NC42NyA2NC42NyAwIDAgMCA1LjUyNSA1My4xMDJjMTQuMTc0IDI0LjY1IDQyLjY0NCAzNy4zMjQgNzAuNDQ2IDMxLjM2YTY0LjcyIDY0LjcyIDAgMCAwIDQ4Ljc1NCAyMS43NDRjMjguNDgxLjAyNSA1My43MTQtMTguMzYxIDYyLjQxNC00NS40ODFhNjQuNzcgNjQuNzcgMCAwIDAgNDMuMjI5LTMxLjM2YzE0LjEzNy0yNC41NTggMTAuODc1LTU1LjQyMy04LjA4My03Ni40ODNtLTk3LjU2IDEzNi4zMzhhNDguNCA0OC40IDAgMCAxLTMxLjEwNS0xMS4yNTVsMS41MzUtLjg3bDUxLjY3LTI5LjgyNWE4LjYgOC42IDAgMCAwIDQuMjQ3LTcuMzY3di03Mi44NWwyMS44NDUgMTIuNjM2Yy4yMTguMTExLjM3LjMyLjQwOS41NjN2NjAuMzY3Yy0uMDU2IDI2LjgxOC0yMS43ODMgNDguNTQ1LTQ4LjYwMSA0OC42MDFNMzcuMTU4IDE5Ny45M2E0OC4zNSA0OC4zNSAwIDAgMS01Ljc4MS0zMi41ODlsMS41MzQuOTIxbDUxLjcyMiAyOS44MjZhOC4zNCA4LjM0IDAgMCAwIDguNDQxIDBsNjMuMTgxLTM2LjQyNXYyNS4yMjFhLjg3Ljg3IDAgMCAxLS4zNTguNjY1bC01Mi4zMzUgMzAuMTg0Yy0yMy4yNTcgMTMuMzk4LTUyLjk3IDUuNDMxLTY2LjQwNC0xNy44MDNNMjMuNTQ5IDg1LjM4YTQ4LjUgNDguNSAwIDAgMSAyNS41OC0yMS4zMzN2NjEuMzlhOC4yOSA4LjI5IDAgMCAwIDQuMTk1IDcuMzE2bDYyLjg3NCAzNi4yNzJsLTIxLjg0NSAxMi42MzZhLjgyLjgyIDAgMCAxLS43NjcgMEw0MS4zNTMgMTUxLjUzYy0yMy4yMTEtMTMuNDU0LTMxLjE3MS00My4xNDQtMTcuODA0LTY2LjQwNXptMTc5LjQ2NiA0MS42OTVsLTYzLjA4LTM2LjYzTDE2MS43MyA3Ny44NmEuODIuODIgMCAwIDEgLjc2OCAwbDUyLjIzMyAzMC4xODRhNDguNiA0OC42IDAgMCAxLTcuMzE2IDg3LjYzNXYtNjEuMzkxYTguNTQgOC41NCAwIDAgMC00LjQtNy4yMTNtMjEuNzQyLTMyLjY5bC0xLjUzNS0uOTIybC01MS42MTktMzAuMDgxYTguMzkgOC4zOSAwIDAgMC04LjQ5MiAwTDk5Ljk4IDk5LjgwOFY3NC41ODdhLjcyLjcyIDAgMCAxIC4zMDctLjY2NWw1Mi4yMzMtMzAuMTMzYTQ4LjY1MiA0OC42NTIgMCAwIDEgNzIuMjM2IDUwLjM5MXpNODguMDYxIDEzOS4wOTdsLTIxLjg0NS0xMi41ODVhLjg3Ljg3IDAgMCAxLS40MS0uNjE0VjY1LjY4NWE0OC42NTIgNDguNjUyIDAgMCAxIDc5Ljc1Ny0zNy4zNDZsLTEuNTM1Ljg3bC01MS42NyAyOS44MjVhOC42IDguNiAwIDAgMC00LjI0NiA3LjM2N3ptMTEuODY4LTI1LjU4TDEyOC4wNjcgOTcuM2wyOC4xODggMTYuMjE4djMyLjQzNGwtMjguMDg2IDE2LjIxOGwtMjguMTg4LTE2LjIxOHoiLz48L3N2Zz4=",
|
51 |
+
"anthropic://claude-3-5-sonnet": "data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxZW0iIGhlaWdodD0iMWVtIiB2aWV3Qm94PSIwIDAgMjQgMjQiPjxwYXRoIGZpbGw9ImN1cnJlbnRDb2xvciIgZD0iTTE3LjMwNCAzLjU0MWgtMy42NzJsNi42OTYgMTYuOTE4SDI0Wm0tMTAuNjA4IDBMMCAyMC40NTloMy43NDRsMS4zNy0zLjU1M2g3LjAwNWwxLjM2OSAzLjU1M2gzLjc0NEwxMC41MzYgMy41NDFabS0uMzcxIDEwLjIyM0w4LjYxNiA3LjgybDIuMjkxIDUuOTQ1WiIvPjwvc3ZnPg==",
|
52 |
+
"vertex://gemini-1.5-flash-001": "data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxZW0iIGhlaWdodD0iMWVtIiB2aWV3Qm94PSIwIDAgMjQgMjQiPjxwYXRoIGZpbGw9IiM0MjY4ZmYiIGQ9Ik0yNCAxMi4wMjRjLTYuNDM3LjM4OC0xMS41OSA1LjUzOS0xMS45NzcgMTEuOTc2aC0uMDQ3QzExLjU4OCAxNy41NjMgNi40MzYgMTIuNDEyIDAgMTIuMDI0di0uMDQ3QzYuNDM3IDExLjU4OCAxMS41ODggNi40MzcgMTEuOTc2IDBoLjA0N2MuMzg4IDYuNDM3IDUuNTQgMTEuNTg4IDExLjk3NyAxMS45Nzd6Ii8+PC9zdmc+",
|
53 |
+
"together://meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo": "data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxZW0iIGhlaWdodD0iMWVtIiB2aWV3Qm94PSIwIDAgMzIgMzIiPjxnIGZpbGw9Im5vbmUiPjxwYXRoIGZpbGw9IiNiNGFjYmMiIGQ9Ik0yMC44NzEgMjQuNzh2LTYuMDZoMy4wMXY1Ljc3YzAgLjM0LS4xMi42Mi0uMzEuOTRsLTIuNDEgNC4yYy0uMTguMjMtLjQ1LjM3LS43NS4zN2gtMS4wM2MtLjIzIDAtLjM4LS4yNC0uMjgtLjQ1bDEuNjctNC4zNWMuMDctLjEzLjEtLjI3LjEtLjQyTTE3LjA5MSAzMGMuMiAwIC4yNi0uMjEuMjItLjM4bC0yLjMyLTguNjFoLTIuOTlsLjg1IDMuNTVjLjE5LjcxLjY2IDEuMzIgMS4zIDEuNjljLjE0LjA4LjI1LjIyLjI5LjM4bC44NyAzLjE0Yy4wNC4xNy4yMS4yMi4zOC4yMnoiLz48cGF0aCBmaWxsPSIjY2RjNGQ2IiBkPSJtMjguNTQxIDIzLjA4bC0xLjI3LS45NmEuOTQuOTQgMCAwIDEtLjI3LS42NnYtMi43NWMwLS42NC0uMTYtMS4yMzgtLjQ0Mi0xLjc2cS4yMTMuMDUuNDQyLjA1YTIgMiAwIDEgMC0xLjk0OS0xLjU0OWEzLjggMy44IDAgMCAwLTEuOC0uNDUxaC04LjE3Yy0uNjYgMC0xLjI3LS40Mi0xLjU3LTEuMDFMMTAuMDQxIDMuNWEyLjIzIDIuMjMgMCAwIDAtMi4xLTEuNWMtLjE4IDAtLjMuMTctLjI0LjM0TDguNTcxIDVjLS4yIDAtMS4wNy4yMy0xLjg1LjczbC0uODA2LjQ5OEw3LjAwMiAxMHY4LjI2YzAgMi4wMSAxLjI1IDMuNzIgMy4wMSA0LjQxdjcuMDJjLS4wMS4xNy4xMy4zMS4zLjMxaDEuMzdjLjE4IDAgLjMyLS4xNC4zMi0uMzF2LTEuOTZjMC0uMTcuMDctLjMyLjE4LS40NGMuNTMtLjUyLjgyLTEuMjMuODItMS45N1YyM2g1LjA3YzEuMjcgMCAyLjQ5LS41NSAzLjMzLTEuNWMwIC45NC40OCAxLjcyIDEuMzggMi4zMmwzLjk2IDIuNDNjLjE2LjExLjI2LjMuMjYuNXYyLjkzYzAgLjE3LjE0LjMxLjMxLjMxaDEuMzdjLjE3IDAgLjMxLS4xNC4zMS0uMzF2LTUuNTFjLjAxLS40LS4xNS0uOC0uNDUtMS4wOSIvPjxwYXRoIGZpbGw9IiNmM2FkNjEiIGQ9Ik02Ljg0MSA2Yy0uMzYgMC0uNzIuMS0xLjAzLjI5bC0yLjE5IDEuMzVjLS4zNi4yMy0uNi42MS0uNjIgMS4wM2MtLjAzLjczLjU1IDEuMzMgMS4yNyAxLjMzaDMuNDljLjU3IDAgMS4wNC0uNDcgMS4wNC0xLjA1di0xYzAtMS4wNy0uODgtMS45NS0xLjk2LTEuOTUiLz48cGF0aCBmaWxsPSIjMWMxYzFjIiBkPSJNNi41IDhhLjUuNSAwIDEgMCAwLTFhLjUuNSAwIDAgMCAwIDFtLTEuOTk5LjVjMC0uMjgtLjIyLS41LS41LS41aC0uNzZhMS4yIDEuMiAwIDAgMC0uMjEgMWguOTdjLjI4IDAgLjUtLjIyLjUtLjUiLz48cGF0aCBmaWxsPSIjZjNhZDYxIiBkPSJNMjguOTkxIDI4aC0xLjk5djEuNjhjMCAuMTcuMTQuMzEuMzEuMzFoMS4zN2MuMTcgMCAuMzEtLjE0LjMxLS4zMXptLTE2Ljk5IDBoLTEuOTl2MS42OWMtLjAxLjE3LjEzLjMxLjMuMzFoMS4zN2MuMTggMCAuMzItLjE0LjMyLS4zMXptNS4wODggMmwtMS4zOTgtLjAxYy0uMTcgMC0uMzQtLjA1LS4zOC0uMjJsLS40OS0xLjc3aDIuMDU0bC40MzYgMS42MmMuMDQuMTctLjAyLjM3OC0uMjE2LjM4em0yLjI4OCAwYS4zMTMuMzEzIDAgMCAxLS4yNzYtLjQ1bC41OTUtMS41NWgyLjRsLS45MzUgMS42M2EuOTUuOTUgMCAwIDEtLjc0Mi4zN3oiLz48L2c+PC9zdmc+",
|
54 |
+
"anthropic://claude-3-haiku-20240307": "data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxZW0iIGhlaWdodD0iMWVtIiB2aWV3Qm94PSIwIDAgMjQgMjQiPjxwYXRoIGZpbGw9ImN1cnJlbnRDb2xvciIgZD0iTTE3LjMwNCAzLjU0MWgtMy42NzJsNi42OTYgMTYuOTE4SDI0Wm0tMTAuNjA4IDBMMCAyMC40NTloMy43NDRsMS4zNy0zLjU1M2g3LjAwNWwxLjM2OSAzLjU1M2gzLjc0NEwxMC41MzYgMy41NDFabS0uMzcxIDEwLjIyM0w4LjYxNiA3LjgybDIuMjkxIDUuOTQ1WiIvPjwvc3ZnPg==",
|
55 |
+
}
|
56 |
+
|
57 |
+
AGGREGATORS = ["openai://gpt-4", "openai://gpt-3.5-turbo"]
|
58 |
+
|
59 |
+
|
60 |
+
def anthropic_streamlit_streamer(stream):
|
61 |
+
"""
|
62 |
+
Process the Anthropic streaming response and yield content from the deltas.
|
63 |
+
|
64 |
+
:param stream: Streaming object from Anthropic API
|
65 |
+
:return: Yields content (text) from the streaming response.
|
66 |
+
"""
|
67 |
+
for event in stream:
|
68 |
+
if hasattr(event, "type"):
|
69 |
+
# Handle content blocks
|
70 |
+
if event.type == "content_block_delta" and hasattr(event, "delta"):
|
71 |
+
# Extract text delta from the event
|
72 |
+
text_delta = getattr(event.delta, "text", None)
|
73 |
+
if text_delta:
|
74 |
+
yield text_delta
|
75 |
+
|
76 |
+
# Handle message completion events (optional if needed)
|
77 |
+
elif event.type == "message_stop":
|
78 |
+
break # End of message, stop streaming
|
79 |
+
|
80 |
+
|
81 |
+
def google_streamlit_streamer(stream):
|
82 |
+
for chunk in stream:
|
83 |
+
yield chunk.text
|
84 |
+
|
85 |
+
|
86 |
+
def together_streamlit_streamer(stream):
|
87 |
+
for chunk in stream:
|
88 |
+
yield chunk.choices[0].delta.content
|
89 |
+
|
90 |
+
|
91 |
+
# Helper functions for LLM council and aggregator selection
|
92 |
+
def llm_council_selector():
|
93 |
+
selected_council = st.radio(
|
94 |
+
"Choose a council configuration", options=list(LLM_COUNCIL_MEMBERS.keys())
|
95 |
+
)
|
96 |
+
return LLM_COUNCIL_MEMBERS[selected_council]
|
97 |
+
|
98 |
+
|
99 |
+
def aggregator_selector():
|
100 |
+
return st.radio("Choose an aggregator LLM", options=AGGREGATORS)
|
101 |
+
|
102 |
+
|
103 |
+
# API calls for different providers
|
104 |
+
def get_openai_response(model_name, prompt):
|
105 |
+
return openai_client.chat.completions.create(
|
106 |
+
model=model_name,
|
107 |
+
messages=[{"role": "user", "content": prompt}],
|
108 |
+
stream=True,
|
109 |
+
)
|
110 |
+
|
111 |
+
|
112 |
+
# https://docs.anthropic.com/en/api/messages-streaming
|
113 |
+
def get_anthropic_response(model_name, prompt):
|
114 |
+
return anthropic_client.messages.create(
|
115 |
+
max_tokens=1024,
|
116 |
+
messages=[{"role": "user", "content": prompt}],
|
117 |
+
model=model_name,
|
118 |
+
stream=True,
|
119 |
+
)
|
120 |
+
|
121 |
+
|
122 |
+
def get_together_response(model_name, prompt):
|
123 |
+
return together_client.chat.completions.create(
|
124 |
+
model=model_name,
|
125 |
+
messages=[{"role": "user", "content": prompt}],
|
126 |
+
stream=True,
|
127 |
+
)
|
128 |
+
|
129 |
+
|
130 |
+
# https://ai.google.dev/gemini-api/docs/text-generation?lang=python
|
131 |
+
def get_google_response(model_name, prompt):
|
132 |
+
model = genai.GenerativeModel(model_name)
|
133 |
+
return model.generate_content(prompt, stream=True)
|
134 |
+
|
135 |
+
|
136 |
+
def get_llm_response(model_identifier, prompt):
|
137 |
+
provider, model_name = model_identifier.split("://")
|
138 |
+
if provider == "openai":
|
139 |
+
return get_openai_response(model_name, prompt)
|
140 |
+
elif provider == "anthropic":
|
141 |
+
return get_anthropic_response(model_name, prompt)
|
142 |
+
elif provider == "together":
|
143 |
+
return get_together_response(model_name, prompt)
|
144 |
+
elif provider == "vertex":
|
145 |
+
return get_google_response(model_name, prompt)
|
146 |
+
else:
|
147 |
+
return None
|
148 |
+
|
149 |
+
|
150 |
+
# Main Streamlit App
|
151 |
+
def main():
|
152 |
+
st.set_page_config(
|
153 |
+
page_title="Language Model Council Sandbox", page_icon="🏛️", layout="wide"
|
154 |
+
)
|
155 |
+
|
156 |
+
# Custom CSS for the chat display
|
157 |
+
center_css = """
|
158 |
+
<style>
|
159 |
+
h1, h2, h3, h6 { text-align: center; }
|
160 |
+
.chat-container {
|
161 |
+
display: flex;
|
162 |
+
align-items: flex-start;
|
163 |
+
margin-bottom: 10px;
|
164 |
+
}
|
165 |
+
.avatar {
|
166 |
+
width: 50px;
|
167 |
+
margin-right: 10px;
|
168 |
+
}
|
169 |
+
.message {
|
170 |
+
background-color: #f1f1f1;
|
171 |
+
padding: 10px;
|
172 |
+
border-radius: 10px;
|
173 |
+
width: 100%;
|
174 |
+
}
|
175 |
+
</style>
|
176 |
+
"""
|
177 |
+
st.markdown(center_css, unsafe_allow_html=True)
|
178 |
+
|
179 |
+
# App title and description
|
180 |
+
st.title("Language Model Council Sandbox")
|
181 |
+
st.markdown("###### Invoke a council of LLMs to generate and judge each other.")
|
182 |
+
st.markdown("###### [ArXiv Paper](https://arxiv.org/abs/2406.08598)")
|
183 |
+
|
184 |
+
# Authentication system
|
185 |
+
if "authenticated" not in st.session_state:
|
186 |
+
st.session_state.authenticated = False
|
187 |
+
|
188 |
+
cols = st.columns([2, 1, 2])
|
189 |
+
if not st.session_state.authenticated:
|
190 |
+
with cols[1]:
|
191 |
+
password = st.text_input("Password", type="password")
|
192 |
+
if st.button("Login", use_container_width=True):
|
193 |
+
if password == PASSWORD:
|
194 |
+
st.session_state.authenticated = True
|
195 |
+
else:
|
196 |
+
st.error("Invalid credentials")
|
197 |
+
|
198 |
+
if st.session_state.authenticated:
|
199 |
+
st.success("Logged in successfully!")
|
200 |
+
|
201 |
+
# Council and aggregator selection
|
202 |
+
selected_models = llm_council_selector()
|
203 |
+
st.write("Selected Models:", selected_models)
|
204 |
+
selected_aggregator = aggregator_selector()
|
205 |
+
st.write("Selected Aggregator:", selected_aggregator)
|
206 |
+
|
207 |
+
# Prompt input
|
208 |
+
prompt = st.text_area("Enter your prompt:")
|
209 |
+
|
210 |
+
if st.button("Submit"):
|
211 |
+
st.write("Responses:")
|
212 |
+
|
213 |
+
# Fetching and streaming responses from each selected model
|
214 |
+
for model in selected_models:
|
215 |
+
# with st.chat_message(model):
|
216 |
+
with st.chat_message(
|
217 |
+
model,
|
218 |
+
avatar=PROVIDER_TO_AVATAR_MAP[model],
|
219 |
+
):
|
220 |
+
message_placeholder = st.empty()
|
221 |
+
stream = get_llm_response(model, prompt)
|
222 |
+
if stream:
|
223 |
+
if model.startswith("anthropic"):
|
224 |
+
stream = anthropic_streamlit_streamer(stream)
|
225 |
+
elif model.startswith("vertex"):
|
226 |
+
stream = google_streamlit_streamer(stream)
|
227 |
+
elif model.startswith("together"):
|
228 |
+
stream = together_streamlit_streamer(stream)
|
229 |
+
message_placeholder.write_stream(stream)
|
230 |
+
|
231 |
+
# Constructing the aggregator prompt
|
232 |
+
aggregator_prompt = f"User prompt: {prompt}\n\n"
|
233 |
+
aggregator_prompt += "Responses from other LLMs:\n"
|
234 |
+
aggregator_prompt += "\n".join(
|
235 |
+
[
|
236 |
+
f"{model}: {st.session_state.get(model, '')}"
|
237 |
+
for model in selected_models
|
238 |
+
]
|
239 |
+
)
|
240 |
+
aggregator_prompt += "\n\nPlease provide an aggregated response."
|
241 |
+
|
242 |
+
# Fetching and streaming response from the aggregator
|
243 |
+
st.write(f"Aggregated response from {selected_aggregator}:")
|
244 |
+
with st.chat_message(selected_aggregator):
|
245 |
+
message_placeholder = st.empty()
|
246 |
+
aggregator_stream = get_llm_response(
|
247 |
+
selected_aggregator, aggregator_prompt
|
248 |
+
)
|
249 |
+
if aggregator_stream:
|
250 |
+
message_placeholder.write_stream(aggregator_stream)
|
251 |
+
else:
|
252 |
+
with cols[1]:
|
253 |
+
st.warning("Please log in to access this app.")
|
254 |
+
|
255 |
+
|
256 |
+
if __name__ == "__main__":
|
257 |
+
main()
|
258 |
+
|
259 |
+
|
260 |
+
# import streamlit as st
|
261 |
+
# from components import llm_council_selector
|
262 |
+
|
263 |
+
# st.title("LLM Council Selector")
|
264 |
+
|
265 |
+
# selected_models = llm_council_selector()
|
266 |
+
|
267 |
+
# if selected_models is not None:
|
268 |
+
# st.write("Selected Models:", selected_models)
|
269 |
+
# else:
|
270 |
+
# st.write("No models selected or component didn't return a value.")
|
271 |
+
|
272 |
+
|
273 |
+
# Choose your council.
|
274 |
+
# Pre-selected.
|
275 |
+
# Smalls: GPT-4o-mini, llama-3.1-70b, qwen-2.0-70b
|
276 |
+
# Flagships: GPT-4o, llama-3.1-405b, qwen-2.0-110b, gemini, claude-3.5-sonnet
|
277 |
+
# Best: chatgpt-4o-latest, gemini-1.5-pro-exp-0827, grok-2-2024-08-13, claude-3-5-sonnet-20240620, llama-3.1-405b-instruct
|
278 |
+
# Custom:
|
279 |
+
# Choose from a list of available models.
|
280 |
+
# All:
|
281 |
+
# All available models.
|
282 |
+
|
283 |
+
# Choose aggregator.
|
284 |
+
# Aggregators are models proficient in synthesizing responses from other models into a single, highquality output. An effective aggregator should maintain or enhance output quality even when
|
285 |
+
# integrating inputs that are of lesser quality than its own.
|
286 |
+
# Choices:
|
287 |
+
# - 4o-latest
|
288 |
+
# - gemini-1.5
|
289 |
+
# - grok-2
|
290 |
+
# - claude-3.5-sonnet
|
291 |
+
# - llama-3.1-405b-instruct
|
292 |
+
|
293 |
+
# Provide a prompt. (Or pre-canned prompts.)
|
294 |
+
# Paste chat history.
|
295 |
|
296 |
+
# Checkbox, enable judging.
|
297 |
+
#
|
298 |
+
# If checked, Judging config:
|
299 |
+
# Single sided
|
300 |
+
# Provide criteria. (or default).
|
301 |
+
# If pairwise, choose granularity (or default).
|
302 |
+
# Choose criteria. (or default).
|
303 |
+
# Enable position swapping?
|
304 |
|
305 |
+
# Go button.
|
306 |
+
# Sections.
|
307 |
+
# 1. Model outputs.
|
308 |
+
# 2. Aggregated output.
|
309 |
+
# 3. Judging underneath each output.
|
310 |
+
# Highlight in green, the output that was best, as determined by council.
|
311 |
+
# Show graph breakdown of scores and justifications. (by criteria, # wins and # losses)
|
312 |
+
# Show final overall score.
|
313 |
+
# Highlight in red, the output that was worst, as determined by council.
|
314 |
+
# Judging section.
|
315 |
+
# Show agreement matrix.
|
316 |
+
# Show bar graph of self-bias.
|
317 |
+
# Plot contrarianism vs. conviction (scatter plot)
|
318 |
+
# Show per-judge scores.
|
319 |
|
320 |
+
# Calculate total cost.
|
321 |
+
# Show total tokens used.
|
|
requirements.txt
CHANGED
@@ -1,2 +1,6 @@
|
|
1 |
streamlit
|
2 |
-
python-dotenv
|
|
|
|
|
|
|
|
|
|
1 |
streamlit
|
2 |
+
python-dotenv
|
3 |
+
openai
|
4 |
+
anthropic
|
5 |
+
together
|
6 |
+
google-generativeai
|