job-fair / app.py
Zekun Wu
update
c41d697
raw
history blame
No virus
3.77 kB
import streamlit as st
import pandas as pd
import json
import http.client
from io import StringIO
from openai import AzureOpenAI
class ContentFormatter:
@staticmethod
def chat_completions(text, settings_params):
message = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": text}
]
data = {"messages": message, **settings_params}
return json.dumps(data)
class AzureAgent:
def __init__(self, api_key, azure_uri, deployment_name, api_version):
self.azure_uri = azure_uri
self.headers = {
'Authorization': f"Bearer {api_key}",
'Content-Type': 'application/json'
}
self.deployment_name = deployment_name
self.api_version = api_version
self.chat_formatter = ContentFormatter
def invoke(self, text, **kwargs):
body = self.chat_formatter.chat_completions(text, {**kwargs})
conn = http.client.HTTPSConnection(self.azure_uri)
conn.request("POST", f'/v{self.api_version}/chat/completions', body=body, headers=self.headers)
response = conn.getresponse()
data = response.read()
conn.close()
decoded_data = data.decode("utf-8")
parsed_data = json.loads(decoded_data)
content = parsed_data["choices"][0]["message"]["content"]
return content
class GPTAgent:
def __init__(self, api_key, azure_endpoint, deployment_name, api_version):
self.client = AzureOpenAI(
api_key=api_key,
api_version=api_version,
azure_endpoint=azure_endpoint
)
self.deployment_name = deployment_name
def invoke(self, text, **kwargs):
response = self.client.chat.completions.create(
model=self.deployment_name,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": text}
],
**kwargs
)
return response.choices[0].message.content
# Streamlit app interface
st.title('JobFair: A Benchmark for Fairness in LLM Employment Decision')
model_type = st.radio("Select the type of agent", ('AzureAgent', 'GPTAgent'))
api_key = st.text_input("API Key", type="password")
endpoint_url = st.text_input("Endpoint URL")
deployment_name = st.text_input("Model Name")
api_version = st.text_input("API Version") # Default API version
temperature = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.5, step=0.01)
max_tokens = st.number_input("Max Tokens", min_value=1, max_value=1000, value=150)
stop_sequences = st.text_input("Stop Sequences", "")
parameters = {"temperature": temperature, "max_tokens": max_tokens, "stop": [stop_sequences] if stop_sequences else []}
# File upload and data display
uploaded_file = st.file_uploader("Choose a file")
if uploaded_file is not None:
# Read data
data = StringIO(uploaded_file.getvalue().decode("utf-8"))
df = pd.read_csv(data)
st.write('Uploaded Data:', df.head())
# Process data button
if st.button('Process Data'):
if model_type == 'AzureAgent':
agent = AzureAgent(api_key, endpoint_url, deployment_name, api_version)
else:
agent = GPTAgent(api_key, endpoint_url, deployment_name, api_version)
# Example processing step (adapt as needed)
df['Response'] = df['prompt'].apply(lambda x: agent.invoke(x, parameters))
st.write('Processed Data:', df.head())
# Generate download link
st.download_button(
label="Download processed data",
data=df.to_csv().encode('utf-8'),
file_name='processed_data.csv',
mime='text/csv',
)