Sa-m commited on
Commit
d20645e
·
verified ·
1 Parent(s): 3171354

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +137 -0
  2. evaluation.py +49 -0
app.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import PyPDF2
3
+ import os
4
+ import google.generativeai as genai
5
+ import tensorflow as tf
6
+ from transformers import BertTokenizer, TFBertModel
7
+ import numpy as np
8
+ import math
9
+ import speech_recognition as sr
10
+ import gtts
11
+ from streamlit.components.v1 import html
12
+ import time
13
+
14
+
15
+ from dotenv import load_dotenv
16
+ load_dotenv()
17
+ # no wide mode
18
+ st.set_page_config(page_title="Streamlit App", page_icon=":shark:", layout="centered", initial_sidebar_state="auto")
19
+
20
+ st.title("Mock Interview")
21
+
22
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
23
+ text_model= genai.GenerativeModel("gemini-pro")
24
+
25
+ st.write("Welcome to the mock interview app. This app will help you prepare for your next interview. You can practice your responses to common interview questions and receive feedback on your responses.")
26
+
27
+ def getallinfo(data):
28
+ text = f"{data} is not properly formatted for this model. Please try again and format the whole in a single paragraph covering all the information."
29
+ response = text_model.generate_content(text)
30
+ response.resolve()
31
+ return response.text
32
+
33
+ def file_processing(uploaded_file):
34
+ # upload pdf of resume
35
+ reader = PyPDF2.PdfReader(uploaded_file)
36
+ text = ""
37
+ for page in reader.pages:
38
+ text += page.extract_text()
39
+ return text
40
+
41
+
42
+ # Load the pre-trained BERT model
43
+ model = TFBertModel.from_pretrained("bert-base-uncased")
44
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
45
+
46
+ # Function to preprocess text and get embeddings
47
+ def get_embedding(text):
48
+ encoded_text = tokenizer(text, return_tensors="tf")
49
+ output = model(encoded_text)
50
+ embedding = output.last_hidden_state[:, 0, :]
51
+ return embedding
52
+
53
+ # Function to generate feedback (replace with your logic)
54
+ def generate_feedback(question, answer):
55
+ # Ensure correct variable name (case-sensitive)
56
+ question_embedding = get_embedding(question)
57
+ answer_embedding = get_embedding(answer)
58
+
59
+ # Enable NumPy-like behavior for transpose
60
+ tf.experimental.numpy.experimental_enable_numpy_behavior()
61
+
62
+ # Calculate similarity score (cosine similarity)
63
+ similarity_score = np.dot(question_embedding, answer_embedding.T) / (np.linalg.norm(question_embedding) * np.linalg.norm(answer_embedding))
64
+
65
+ # Generate basic feedback based on similarity score
66
+ corrected_string = f"Feedback: {np.array2string(similarity_score, precision=2)}"
67
+ # print(corrected_string)
68
+ return np.array2string(similarity_score, precision=2)
69
+
70
+ def generate_questions(roles, data):
71
+ questions = []
72
+ text = f"If this is not a resume then return text uploaded pdf is not a resume. this is a resume overview of the candidate. The candidate details are in {data}. The candidate has applied for the role of {roles}. Generate questions for the candidate based on the role applied and on the Resume of the candidate. Not always necceassary to ask only technical questions related to the role. Ask some personal questions too. Ask no additional questions. Dont categorize the questions. No of questions should range from 1-3 questions only. Ask one question at a time only."
73
+ response = text_model.generate_content(text)
74
+ response.resolve()
75
+ # slipt the response into questions either by \n or by ? or by . or by !
76
+ questions = response.text.split("\n")
77
+
78
+ return questions
79
+
80
+
81
+ def generate_overall_feedback(data, percent, answer, questions):
82
+ percent = float(percent)
83
+ if percent > 0.5:
84
+ test = f"Here is the overview of the candidate {data}. In the interview the questions asked were {questions}. The candidate has answered the questions as follows: {answer}. Based on the answers provided, the candidate has scored {percent}. The candidate has done well in the interview. The candidate has answered the questions well and has a good understanding of the concepts. The candidate has scored well in the interview. The candidate has scored {percent} in the interview. The candidate has done well in the interview. The candidate has answered the questions well and has a good understanding of the concepts. The candidate has scored well in the interview. The candidate has scored {percent} in the interview."
85
+ else:
86
+ test = f"Here is the overview of the candidate {data}. In the interview the questions asked were {questions}. The candidate has answered the questions as follows: {answer}. Based on the answers provided, the candidate has scored {percent}. tell the average percent and rate the interview out of 10. Give the feedback to the candidate about the interview and areas of improvements. While talking to candidate always take their name. give the candidate various ways to improve their interview skills. The candidate needs to know about where they are going wrong and the solution to the issues they are having during the interview."
87
+ # st.write(test)
88
+ response = text_model.generate_content(test)
89
+ response.resolve()
90
+ return response.text
91
+
92
+ def store_audio_text():
93
+ r = sr.Recognizer()
94
+ with sr.Microphone() as source:
95
+ st.error("Speak now")
96
+ audio = r.listen(source)
97
+ try:
98
+ text = r.recognize_google(audio)
99
+ # st.success(f"Your Answer: {text}")
100
+ return text
101
+ except:
102
+ st.error("Sorry could not recognize your voice")
103
+ return " "
104
+
105
+ uploaded_file = st.file_uploader("Upload your resume in simple Document Format", type=["pdf"])
106
+ roles_applied = []
107
+ if uploaded_file is not None:
108
+ st.write("File uploaded successfully!")
109
+ data = file_processing(uploaded_file)
110
+ # st.write(data)
111
+ # st.write(getallinfo(data))
112
+ updated_data = getallinfo(data)
113
+ # st.write(updated_data)
114
+ roles = st.multiselect("Select your job role:", ["Data Scientist", "Software Engineer", "Product Manager", "Data Analyst", "Business Analyst"])
115
+ if roles:
116
+ roles_applied.append(roles)
117
+ st.write(f"Selected roles: {roles}")
118
+ questions = generate_questions(roles, updated_data)
119
+ feedback = []
120
+ answers = []
121
+ ans = ""
122
+ interaction = {}
123
+ for i in range(len(questions)):
124
+ st.write(questions[i])
125
+ ans = store_audio_text()
126
+ st.success(ans)
127
+ answers.append(ans)
128
+ percent = 0.0
129
+ percent = generate_feedback(questions[i], answers[i])
130
+ print(percent)
131
+ feedback.append(generate_overall_feedback(data, percent, answers[i], questions[i]))
132
+ interaction[questions[i]] = answers[i]
133
+ if st.button("Submit"):
134
+ for i in range(len(questions)):
135
+ st.write(interaction[questions[i]])
136
+ st.write(feedback[i])
137
+ # st.write("Thank you for your responses!")
evaluation.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import matplotlib.pyplot as plt
3
+ st.set_page_config(layout="wide")
4
+ # Define metrics and random ratings (customize if needed)
5
+ metrics = {
6
+ "Communication skills": 7,
7
+ "Teamwork and collaboration": 8,
8
+ "Problem-solving and critical thinking": 9,
9
+ "Time management and organization": 6,
10
+ "Adaptability and resilience": 8,
11
+ }
12
+ col1, col2 = st.columns(2)
13
+
14
+ # Calculate overall average rating
15
+ average_rating = sum(metrics.values()) / len(metrics)
16
+
17
+ # Option 1: Full width containers
18
+ with col1:
19
+ st.header("Hey Deven, we have evaluated your interview:")
20
+ # Display metrics and progress bars
21
+ for metric, rating in metrics.items():
22
+ st.subheader(metric)
23
+ st.write(f"Rating: {rating}")
24
+ progress_bar_width = int(200 * rating / 10)
25
+ st.markdown(f"<div style='background-color: lightblue; width: {progress_bar_width}px; height: 10px;'></div>", unsafe_allow_html=True)
26
+
27
+ with col2:
28
+ st.header("Areas for improvement based on your answers:")
29
+ # Create and display pie chart
30
+ plt.figure(figsize=(4, 4))
31
+ plt.pie(metrics.values(), labels=metrics.keys(), autopct="%1.1f%%")
32
+ plt.axis("equal")
33
+ st.pyplot(use_container_width=True)
34
+
35
+ st.subheader(f"Overall average rating: {average_rating:.2f}")
36
+ # Use Markdown for rich text and flexibility
37
+ improvement_content = """
38
+
39
+ * **Communication:** Focus on clarity, conciseness, and tailoring your responses to the audience. Use examples and evidence to support your points.
40
+ * **Teamwork and collaboration:** Highlight your teamwork skills through specific examples and demonstrate your ability to work effectively with others.
41
+ * **Problem-solving and critical thinking:** Clearly explain your problem-solving approach and thought process. Show your ability to analyze information and arrive at logical solutions.
42
+ * **Time management and organization:** Emphasize your ability to manage time effectively and stay organized during challenging situations.
43
+ * **Adaptability and resilience:** Demonstrate your ability to adapt to new situations and overcome challenges. Share examples of how you have handled unexpected situations or setbacks in the past.
44
+
45
+ **Remember:** This is just a starting point. Customize the feedback based on the specific strengths and weaknesses identified in your interview.
46
+
47
+ """
48
+
49
+ st.markdown(improvement_content)