Spaces:
Sleeping
Sleeping
File size: 3,771 Bytes
0b5c5aa 117a821 0b5c5aa 117a821 453640a 117a821 0b5c5aa 117a821 0b5c5aa 117a821 0b5c5aa 117a821 c41d697 117a821 0b5c5aa 117a821 0b5c5aa 117a821 013b4f2 117a821 0b5c5aa 117a821 c41d697 16a4a07 117a821 16a4a07 0b5c5aa 16a4a07 0b5c5aa 16a4a07 c41d697 16a4a07 0b5c5aa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import streamlit as st
import pandas as pd
import json
import http.client
from io import StringIO
from openai import AzureOpenAI
class ContentFormatter:
@staticmethod
def chat_completions(text, settings_params):
message = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": text}
]
data = {"messages": message, **settings_params}
return json.dumps(data)
class AzureAgent:
def __init__(self, api_key, azure_uri, deployment_name, api_version):
self.azure_uri = azure_uri
self.headers = {
'Authorization': f"Bearer {api_key}",
'Content-Type': 'application/json'
}
self.deployment_name = deployment_name
self.api_version = api_version
self.chat_formatter = ContentFormatter
def invoke(self, text, **kwargs):
body = self.chat_formatter.chat_completions(text, {**kwargs})
conn = http.client.HTTPSConnection(self.azure_uri)
conn.request("POST", f'/v{self.api_version}/chat/completions', body=body, headers=self.headers)
response = conn.getresponse()
data = response.read()
conn.close()
decoded_data = data.decode("utf-8")
parsed_data = json.loads(decoded_data)
content = parsed_data["choices"][0]["message"]["content"]
return content
class GPTAgent:
def __init__(self, api_key, azure_endpoint, deployment_name, api_version):
self.client = AzureOpenAI(
api_key=api_key,
api_version=api_version,
azure_endpoint=azure_endpoint
)
self.deployment_name = deployment_name
def invoke(self, text, **kwargs):
response = self.client.chat.completions.create(
model=self.deployment_name,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": text}
],
**kwargs
)
return response.choices[0].message.content
# Streamlit app interface
st.title('JobFair: A Benchmark for Fairness in LLM Employment Decision')
model_type = st.radio("Select the type of agent", ('AzureAgent', 'GPTAgent'))
api_key = st.text_input("API Key", type="password")
endpoint_url = st.text_input("Endpoint URL")
deployment_name = st.text_input("Model Name")
api_version = st.text_input("API Version") # Default API version
temperature = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.5, step=0.01)
max_tokens = st.number_input("Max Tokens", min_value=1, max_value=1000, value=150)
stop_sequences = st.text_input("Stop Sequences", "")
parameters = {"temperature": temperature, "max_tokens": max_tokens, "stop": [stop_sequences] if stop_sequences else []}
# File upload and data display
uploaded_file = st.file_uploader("Choose a file")
if uploaded_file is not None:
# Read data
data = StringIO(uploaded_file.getvalue().decode("utf-8"))
df = pd.read_csv(data)
st.write('Uploaded Data:', df.head())
# Process data button
if st.button('Process Data'):
if model_type == 'AzureAgent':
agent = AzureAgent(api_key, endpoint_url, deployment_name, api_version)
else:
agent = GPTAgent(api_key, endpoint_url, deployment_name, api_version)
# Example processing step (adapt as needed)
df['Response'] = df['prompt'].apply(lambda x: agent.invoke(x, parameters))
st.write('Processed Data:', df.head())
# Generate download link
st.download_button(
label="Download processed data",
data=df.to_csv().encode('utf-8'),
file_name='processed_data.csv',
mime='text/csv',
)
|