Spaces:
Sleeping
Sleeping
import time | |
import pandas as pd | |
import streamlit as st | |
import os | |
import json | |
from openai import AzureOpenAI | |
from model import create_models, configure_settings, load_documents_and_create_index, \ | |
create_chat_prompt_template, execute_query | |
from datasets import Dataset, DatasetDict, load_dataset, concatenate_datasets | |
client = AzureOpenAI(azure_endpoint="https://personality-service.openai.azure.com/", | |
api_key=os.getenv("AZURE_OPENAI_KEY"), api_version="2024-02-15-preview") | |
TOKEN = os.getenv('hf_token') | |
def store_feedback(user_input, response, feedback, rating,repo): | |
dataset = load_dataset(repo, token=TOKEN, download_mode="force_redownload", ignore_verifications=True) | |
new_entry = pd.DataFrame({"user_input": [user_input], "response": [response], "feedback": [feedback], "rate": [rating]}) | |
new_dataset = Dataset.from_pandas(new_entry) | |
updated_dataset = concatenate_datasets([dataset["train"], new_dataset]) | |
updated_dataset.push_to_hub(repo, private=False, token=TOKEN) | |
# Function to generate a completion using OpenAI API | |
def generate_one_completion(message, temperature): | |
response = client.chat.completions.create( | |
model="personality_gpt4o", | |
temperature=temperature, | |
max_tokens=1000, # Adjust based on desired response length | |
frequency_penalty=0.2, # To avoid repetition | |
presence_penalty=0.2, # To introduce new topics | |
messages=message, | |
stream=False | |
) | |
return response | |
import json | |
def get_profile_str(profile): | |
bio_info = profile['bio_information'] | |
main_profile = profile['main_profile'] | |
red_flag = profile['red_flag'] | |
motivation = profile['motivation'] | |
profile_str = f"Bio Information:\n" | |
for key, value in bio_info.items(): | |
profile_str += f"- {key.replace('_', ' ').title()}: {value}\n" | |
profile_str += f"\nMain Profile:\n" | |
for key, value in main_profile.items(): | |
profile_str += f"- {key.title()}: {value['score']} - {value['summary']}\n" | |
profile_str += f"\nRed Flags:\n" | |
for key, value in red_flag.items(): | |
profile_str += f"- {key.title()}: {value['score']} - {value['summary']}\n" | |
profile_str += f"\nMotivation:\n" | |
for key, value in motivation.items(): | |
profile_str += f"- {key.title()}: {value['score']} - {value['summary']}\n" | |
return profile_str | |
def generate_prompt_from_profile(profile, version="TestTakersSummary"): | |
with open('prompts.json') as f: | |
prompt_sets = json.load(f)['Prompts'] | |
prompt_templates = prompt_sets[version] | |
try: | |
# Fetching profile data | |
individual_name = profile['bio_information'].get('Name', 'the individual') | |
# Generating bio, profile, and red flags sections | |
bio_section = "\n".join( | |
[f"- {k.replace('_', ' ').title()}: {v}" for k, v in profile['bio_information'].items()]) | |
main_profile_section = "\n".join( | |
[f"- {trait.title()}: {details['score']} - {details['summary']}" for trait, details in | |
profile['main_profile'].items()]) | |
red_flags_section = "\n".join( | |
[f"- {trait.title()}: {details['score']} - {details['summary']}" for trait, details in | |
profile['red_flag'].items()]) | |
motivation_section = "\n".join( | |
[f"- {trait.title()}: {details['score']} - {details['summary']}" for trait, details in | |
profile['motivation'].items()]) | |
# Replacing placeholders in the prompts | |
prompts = [ | |
x.replace('{{INDIVIDUAL_NAME}}', individual_name).replace('{{BIO}}', bio_section).replace('{{PROFILE}}',main_profile_section).replace( | |
'{{REDFLAGS_PROFILE}}', red_flags_section).replace('{{MOTIVATION_PROFILE}}', motivation_section) for x in prompt_templates] | |
# Compiling final prompt | |
prompt = "\n".join(prompts) | |
except KeyError as e: | |
return [{"role": "system", "content": f"Error processing profile data: missing {str(e)}"}] | |
message = [ | |
{"role": "system", "content": prompt_sets[version][0]}, | |
{"role": "user", "content": prompt} | |
] | |
return message | |
def display_profile_info(profile): | |
main_profile = profile["main_profile"] | |
red_flag = profile["red_flag"] | |
bio_info = profile["bio_information"] | |
st.sidebar.markdown("### Bio Information: ") | |
st.sidebar.markdown("\n".join([f"- **{key.replace('_', ' ')}**: {value}" for key, value in bio_info.items()])) | |
st.sidebar.markdown("### Main Profile: ") | |
st.sidebar.markdown("\n".join( | |
[f"- **{attribute}**: {details['score']} - {details['summary']}" for attribute, details in | |
main_profile.items()])) | |
st.sidebar.markdown("### Red Flags: ") | |
st.sidebar.markdown("\n".join( | |
[f"- **{attribute}**: {details['score']} - {details['summary']}" for attribute, details in red_flag.items()])) | |
st.sidebar.markdown("### Motivation: ") | |
st.sidebar.markdown("\n".join( | |
[f"- **{attribute}**: {details['score']} - {details['summary']}" for attribute, details in profile['motivation'].items()])) | |
def validate_json(profile): | |
required_keys = ['bio_information', 'main_profile', 'red_flag', 'motivation'] | |
for key in required_keys: | |
if key not in profile: | |
return False, f"Key '{key}' is missing." | |
if not isinstance(profile[key], dict): | |
return False, f"'{key}' should be a dictionary." | |
return True, "JSON structure is valid." | |
def logout(): | |
st.session_state['authenticated'] = False | |
st.session_state['profile'] = None | |
st.session_state['show_chat'] = None | |
st.session_state['analysis'] = None | |
st.rerun() | |
def main_app(): | |
sidebar_components() | |
if st.button('Logout'): | |
logout() | |
# Streamlit app | |
st.title('Metaprofiling\'s Career Insight Analyzer Demo') | |
# Check if a profile is selected | |
if st.session_state['profile']: | |
profile = st.session_state['profile'] | |
display_profile_info(profile) # Display the profile information | |
st.markdown(""" | |
### Generation Temperature | |
Adjust the 'Generation Temperature' to control the creativity of the AI responses. | |
- A *lower temperature* (closer to 0.0) generates more predictable, conservative responses. | |
- A *higher temperature* (closer to 1.0) generates more creative, diverse responses. | |
""") | |
# Temperature slider | |
st.session_state['temperature'] = st.slider("", min_value=0.0, max_value=1.0, value=0.5, step=0.01) | |
# Allow user to choose from different versions of the prompt | |
st.session_state['version'] = st.selectbox("Select Prompt Version", ["TestTakersSummary", "ManagersSummary"]) | |
# Generate and display prompt | |
if st.button(f'Analyze Profile ({st.session_state["version"]})'): | |
# with st.spinner('Generating completion...'): | |
prompt = generate_prompt_from_profile(profile, version=st.session_state['version']) | |
with st.chat_message("assistant"): | |
stream = client.chat.completions.create( | |
model="personality_gpt4o", | |
temperature=st.session_state['temperature'], | |
max_tokens=4096, # Adjust based on desired response length | |
frequency_penalty=0.2, # To avoid repetition | |
presence_penalty=0.2, # To introduce new topics | |
messages=prompt, | |
stream=True) | |
response = st.write_stream(stream) | |
# st.markdown(response_test_taker) | |
st.session_state['analysis'] = response | |
st.session_state['show_chat'] = True | |
st.rerun() | |
# display the response | |
if st.session_state['analysis']: | |
# Ask for feedback | |
st.markdown(st.session_state['analysis']) | |
# Ask the user to choose the type of feedback | |
feedback_type = st.selectbox( | |
"Select the type of feedback:", | |
["Report", "Coach"] | |
) | |
# Set the dataset identifier based on feedback type | |
if feedback_type == "Report": | |
dataset_id = "wu981526092/feedback_report" | |
else: | |
dataset_id = "wu981526092/feedback_coach" | |
st.markdown(f"Provide feedback on the {feedback_type.lower()}:") | |
criteria = { | |
"Faithfulness": "Are all claims made in the answer inferred from the given context, i.e., not hallucinated?", | |
"Answer Relevancy": "Is the answer relevant to the question?", | |
"Context Relevancy": "Is the context relevant to the question?", | |
"Correctness": "Is the answer factually correct, based on the context?", | |
"Clarity": "Is the answer explained clearly without the extensive jargon of the original document?", | |
"Completeness": "Is the question answered fully, with all parts and subquestions being addressed?", | |
} | |
ratings = {} | |
for criterion, description in criteria.items(): | |
ratings[criterion] = st.slider(f"{criterion}: {description}", 0, 10, 5,key=f"{feedback_type} {criterion}") | |
feedback = st.text_input("Provide additional feedback on the response:",key=f"{feedback_type} feedback") | |
if st.button('Submit Report Feedback'): | |
if feedback_type == "Report": | |
store_feedback(str(generate_prompt_from_profile(profile, version=st.session_state['version'])), st.session_state['analysis'], feedback, str(ratings), dataset_id) | |
else: | |
store_feedback(str(st.session_state['coach_query']), str(st.session_state['coach_response']), feedback, str(ratings), dataset_id) | |
st.success("Feedback submitted successfully!") | |
else: | |
st.write("Please upload a profile JSON file or use the example profile.") | |
# Function to verify credentials and set the session state | |
def verify_credentials(): | |
if st.session_state['username'] == os.getenv("username_app") and st.session_state['password'] == os.getenv( | |
"password_app"): | |
st.session_state['authenticated'] = True | |
else: | |
st.error("Invalid username or password") | |
# Login page | |
def login_page(): | |
st.title("Welcome to Metaprofiling's Career Insight Analyzer Demo") | |
st.write( | |
"This application provides in-depth analysis and insights into professional profiles. Please log in to continue.") | |
# Description and Instructions | |
st.markdown(""" | |
## How to Use This Application | |
- Enter your username and password in the sidebar. | |
- Click on 'Login' to access the application. | |
- Once logged in, you will be able to upload and analyze professional profiles. | |
""") | |
st.sidebar.write("Login:") | |
username = st.sidebar.text_input("Username") # , key='username') | |
password = st.sidebar.text_input("Password", type="password") # , key='password') | |
st.session_state['username'] = username | |
st.session_state['password'] = password | |
st.sidebar.button("Login", on_click=verify_credentials) | |
def sidebar_components(): | |
with st.sidebar: | |
if st.button('Reset'): | |
st.session_state['profile'] = None | |
st.session_state['show_chat'] = None | |
st.session_state['analysis'] = None | |
st.rerun() | |
if not st.session_state['show_chat']: | |
# Instructions for JSON format | |
st.markdown("### JSON File Requirements:") | |
st.markdown("1. Must contain 'bio_information', 'main_profile', and 'red_flag' as top-level keys.") | |
st.markdown("2. Both keys should have dictionary values.") | |
st.markdown("### Choose the Definition:") | |
st.session_state['definition'] = st.selectbox("Select Definition", [1, 2, 3]) | |
st.session_state['chat_context'] = st.selectbox("Select Chat Context", ["analysis", "profile"]) | |
# File uploader | |
st.markdown("### Upload a profile JSON file") | |
uploaded_file = st.file_uploader("", type=['json']) | |
if uploaded_file is not None: | |
try: | |
profile_data = json.load(uploaded_file) | |
valid, message = validate_json(profile_data) | |
if valid: | |
st.session_state['profile'] = profile_data | |
else: | |
st.error(message) | |
except json.JSONDecodeError: | |
st.error("Invalid JSON file. Please upload a valid JSON file.") | |
# Button to load example profile | |
if st.button('Use Example Profile'): | |
if st.session_state['definition'] == 1: | |
file_name = "example_data_definition_1.json" | |
elif st.session_state['definition'] == 2: | |
file_name = "example_data_definition_2.json" | |
else: | |
file_name = "example_data_definition_3.json" | |
with open(file_name, 'r') as file: | |
st.session_state['profile'] = json.load(file) | |
else: | |
st.sidebar.title("Chat with Our Career Advisor") | |
#st.sidebar.markdown( | |
#"Hello, we hope you learned something about yourself in this report. This chat is here so you can ask any questions you have about your report! It’s also a great tool to get ideas about how you can use the information in your report for your personal development and achieving your current goals.") | |
# Name to be included in the questions | |
# name = st.session_state['profile']['bio_information'].get('Name', 'the individual') | |
# List of question templates where {} will be replaced with the name | |
question_templates = [ | |
"What are the main risks associated with {}’s profile?", | |
"What are the implications of {}’s profile for working with others?", | |
# "What conclusions might we draw from his profile about {}’s style of leadership?", | |
# "Looking specifically at {}'s Red Flags, are there any particular areas of concern?", | |
# "Based on this profile, is {} better suited as a COO or a CEO?", | |
# "If speed of execution is important, based on his profile, how likely is {} to be able to achieve this?", | |
# "How is {} likely to react to business uncertainty and disruption?", | |
# "Based on his profile, what should a coaching plan designed for {} focus on?" | |
] | |
# Formatting each question template with the name | |
questions_list = [question.format("Test Taker") for question in question_templates] | |
# Prepare the questions for Markdown display | |
questions_markdown = "\n\n".join( | |
[f"Q{index + 1}: {question}" for index, question in enumerate(questions_list)]) | |
# Code to display in the app | |
st.sidebar.markdown("### Suggest Questions") | |
st.sidebar.markdown(questions_markdown) | |
# st.sidebar.text_area("Suggested Questions", value=questions.choices[0].message.content, height=200, disabled=True) | |
user_input = st.sidebar.text_input("Ask a question about the profile analysis:") | |
llm, embed_model = create_models() | |
configure_settings(llm, embed_model) | |
index = load_documents_and_create_index() | |
if st.sidebar.button('Submit'): | |
if user_input: | |
st.session_state['coach_query'] = str(user_input) | |
if st.session_state['chat_context'] == "profile": | |
chat_prompt_template = create_chat_prompt_template(get_profile_str(st.session_state['profile']),st.session_state['definition']) | |
else: | |
chat_prompt_template = create_chat_prompt_template(st.session_state['analysis'],st.session_state['definition']) | |
st.session_state['coach_response'] = execute_query(index, chat_prompt_template, user_input) | |
st.sidebar.markdown(st.session_state['coach_response']) | |
# Initialize session state variables with default values if not already set | |
session_defaults = { | |
'show_chat': None, | |
'definition': 1, | |
'chat_context': "analysis", | |
'profile': None, | |
'analysis': None, | |
'temperature': 0, | |
'version': "", | |
'username': '', | |
'password': '', | |
'authenticated': False, | |
'coach_response':"", | |
'coach_query':"" | |
} | |
for key, default in session_defaults.items(): | |
if key not in st.session_state: | |
st.session_state[key] = default | |
# Show login or main app based on authentication status | |
if st.session_state['authenticated']: | |
main_app() | |
else: | |
login_page() | |