Spaces:
Running
Running
import streamlit as st | |
import pandas as pd | |
import json | |
import http.client | |
from io import StringIO | |
from openai import AzureOpenAI | |
class ContentFormatter: | |
def chat_completions(text, settings_params): | |
message = [ | |
{"role": "system", "content": "You are a helpful assistant."}, | |
{"role": "user", "content": text} | |
] | |
data = {"messages": message, **settings_params} | |
return json.dumps(data) | |
class AzureAgent: | |
def __init__(self, api_key, azure_uri, deployment_name): | |
self.azure_uri = azure_uri | |
self.headers = { | |
'Authorization': f"Bearer {api_key}", | |
'Content-Type': 'application/json' | |
} | |
self.deployment_name = deployment_name | |
self.chat_formatter = ContentFormatter | |
def invoke(self, text, **kwargs): | |
body = self.chat_formatter.chat_completions(text, {**kwargs}) | |
conn = http.client.HTTPSConnection(self.azure_uri) | |
conn.request("POST", f'/v1/chat/completions', body=body, headers=self.headers) | |
response = conn.getresponse() | |
data = response.read() | |
conn.close() | |
decoded_data = data.decode("utf-8") | |
parsed_data = json.loads(decoded_data) | |
content = parsed_data["choices"][0]["message"]["content"] | |
return content | |
class GPTAgent: | |
def __init__(self, api_key, azure_endpoint, deployment_name, api_version): | |
self.client = AzureOpenAI( | |
api_key=api_key, | |
api_version=api_version, | |
azure_endpoint=azure_endpoint | |
) | |
self.deployment_name = deployment_name | |
def invoke(self, text, **kwargs): | |
response = self.client.chat.completions.create( | |
model=self.deployment_name, | |
messages=[ | |
{"role": "system", "content": "You are a helpful assistant."}, | |
{"role": "user", "content": text} | |
], | |
**kwargs | |
) | |
return response.choices[0].message.content | |
# Streamlit app interface | |
st.title('JobFair: A Benchmark for Fairness in LLM Employment Decision') | |
# Streamlit app interface | |
st.sidebar.title('Model Settings') | |
model_type = st.sidebar.radio("Select the type of agent", ('AzureAgent', 'GPTAgent')) | |
api_key = st.sidebar.text_input("API Key", type="password") | |
endpoint_url = st.sidebar.text_input("Endpoint URL") | |
deployment_name = st.sidebar.text_input("Model Name") | |
if model_type == 'GPTAgent': | |
api_version = st.sidebar.text_input("API Version", '2024-02-15-preview') # Default API version | |
# Model invocation parameters | |
temperature = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, value=0.5, step=0.01) | |
max_tokens = st.sidebar.number_input("Max Tokens", min_value=1, max_value=1000, value=150) | |
parameters = {"temperature": temperature, "max_tokens": max_tokens} | |
# File upload and data display | |
uploaded_file = st.file_uploader("Choose a file") | |
if uploaded_file is not None: | |
# Read data | |
data = StringIO(uploaded_file.getvalue().decode("utf-8")) | |
df = pd.read_csv(data) | |
# Process data button | |
if st.button('Process Data'): | |
if model_type == 'AzureAgent': | |
agent = AzureAgent(api_key, endpoint_url, deployment_name) | |
else: | |
agent = GPTAgent(api_key, endpoint_url, deployment_name, api_version) | |
df['Response'] = df['prompt'].apply(lambda x: agent.invoke(x, **parameters)) | |
# Display processed data | |
st.write('Processed Data:', df) | |