File size: 5,075 Bytes
2d63df8
 
 
 
 
 
 
 
 
 
d81c4a3
2d63df8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import streamlit as st
import google.generativeai as genai
import os

# Define a function to check if all sections are filled
def check_all_responses_filled(responses):
    return all(responses) and all([all(responses[respondent]) for respondent in respondents])

# Configure the Google Generative AI API if the API key is available
def configure_genai_api():
    api_key = "AIzaSyDidbVQLrcwKuNEryNTwZCaLGiVQGmi6g0"

    if api_key is None:
        st.error("API key not found. Please set the GENAI_API_KEY environment variable.")
        return None
    else:
        genai.configure(api_key=api_key)

        generation_config = {
            "temperature": 0.9,
            "top_p": 1,
            "top_k": 40,
            "max_output_tokens": 2048,
        }

        safety_settings = [
            {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
            {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
            {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
            {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
        ]

        return genai.GenerativeModel(model_name="gemini-1.0-pro",
                                     generation_config=generation_config,
                                     safety_settings=safety_settings)

# Function to combine responses from all sections
def combine_responses(saved_data):
    def extract_strings(data):
        if isinstance(data, dict):
            return " ".join([extract_strings(value) for value in data.values()])
        elif isinstance(data, list):
            return " ".join([extract_strings(item) for item in data])
        elif isinstance(data, str):
            return data
        else:
            return ""

    combined_responses = []
    for section, responses in saved_data.items():
        for response in responses.values():
            combined_responses.append(extract_strings(response))
    return " ".join(combined_responses)


def app():
    st.header("Summary of Your Professional Profile")
    
    # Check for environment variable and configure the API
    model = configure_genai_api()
    if not model:
        return

    # Display and collect all responses
    all_responses = {
        'core_values': st.session_state.get('core_values_responses'),
        'strengths': st.session_state.get('strength_responses'),
        'partner_assessment': st.session_state.get('stories_feedback'),
        'network_feedback': st.session_state.get('network_feedback'),
        'skills': st.session_state.get('core_skills_responses'),
        'priorities': st.session_state.get('priorities_data_saved'),
        'preferences': st.session_state.get('preferences_responses'), 
    }

    # Display all collected responses
    for section, responses in all_responses.items():
        if responses:
            st.subheader(f"Your {section.replace('_', ' ').title()}:")
            if isinstance(responses, dict):
                for question, response in responses.items():
                    st.text(f"Q: {question}")
                    st.text(f"A: {response}")
            elif isinstance(responses, list):
                for response in responses:
                    st.text(response)
            st.write("---")  # Separator line

    # Check if all sections are completed
    all_responses_filled = all(responses is not None for responses in all_responses.values())

    if st.button("Generate My Career Summary") and all_responses_filled:
        combined_responses_text = combine_responses(all_responses)
        prompt_template = """Make a summary of skills and experience. And future projection of what job a person can do next
Considering all above question responses including values, strengths, weaknesses
Recommend the best next career option this person can do and what upskilling they require
Suggest best certifications, best courses for this person
"""
        prompt = prompt_template + combined_responses_text
        
        try:
            # Attempt to generate a comprehensive career summary and future projection
            response = model.generate_content([prompt])
            if response and hasattr(response, 'parts'):
                career_summary = ''.join([part['text'] for part in response.parts])
                st.subheader("Career Summary and Projection")
                st.write("Based on all the information you've provided, here's a comprehensive summary and future career projection tailored to you:")
                st.info(career_summary)
            else:
                st.error("The response from the API was not in the expected format.")
        except Exception as e:
            st.error("An error occurred while generating your career summary. Please try again later.")
            st.error(f"Error: {e}")
    elif not all_responses_filled:
        st.error("Please ensure all sections are completed to receive your comprehensive career summary.")

if __name__ == "__main__":
    app()