Ntabukiraniro commited on
Commit
ffcd1d5
1 Parent(s): ac019e9

Update initialization.py

Browse files
Files changed (1) hide show
  1. initialization.py +51 -50
initialization.py CHANGED
@@ -27,70 +27,71 @@ def resume_reader(resume):
27
 
28
  def initialize_session_state(template=None, position=None):
29
  """ initialize session states """
 
 
30
  if 'jd' in st.session_state:
 
31
  st.session_state.docsearch = embedding(st.session_state.jd)
32
  else:
 
33
  st.session_state.docsearch = embedding(resume_reader(st.session_state.resume))
34
 
35
- #if 'retriever' not in st.session_state:
36
  st.session_state.retriever = st.session_state.docsearch.as_retriever(search_type="similarity")
37
- #if 'chain_type_kwargs' not in st.session_state:
38
  if 'jd' in st.session_state:
 
39
  Interview_Prompt = PromptTemplate(input_variables=["context", "question"],
40
- template=template)
41
  st.session_state.chain_type_kwargs = {"prompt": Interview_Prompt}
42
  else:
43
- st.session_state.chain_type_kwargs = prompt_sector(position, templates)
44
- #if 'memory' not in st.session_state:
 
 
45
  st.session_state.memory = ConversationBufferMemory()
46
- # interview history
47
- #if "history" not in st.session_state:
48
  st.session_state.history = []
49
- # token count
50
- #if "token_count" not in st.session_state:
51
  st.session_state.token_count = 0
52
- #if "guideline" not in st.session_state:
53
- llm = ChatOpenAI(
54
- model_name="gpt-3.5-turbo",
55
- temperature=0.6, )
56
- st.session_state.guideline = RetrievalQA.from_chain_type(
57
- llm=llm,
58
- chain_type_kwargs=st.session_state.chain_type_kwargs, chain_type='stuff',
59
- retriever=st.session_state.retriever, memory=st.session_state.memory).run(
60
- "Create an interview guideline and prepare only one questions for each topic. Make sure the questions tests the technical knowledge")
61
- # llm chain and memory
62
- #if "screen" not in st.session_state:
63
- llm = ChatOpenAI(
64
- model_name="gpt-3.5-turbo",
65
- temperature=0.8, )
66
- PROMPT = PromptTemplate(
67
- input_variables=["history", "input"],
68
- template="""I want you to act as an interviewer strictly following the guideline in the current conversation.
69
 
70
- Ask me questions and wait for my answers like a real person.
71
- Do not write explanations.
72
- Ask question like a real person, only one question at a time.
73
- Do not ask the same question.
74
- Do not repeat the question.
75
- Do ask follow-up questions if necessary.
76
- You name is GPTInterviewer.
77
- I want you to only reply as an interviewer.
78
- Do not write all the conversation at once.
79
- If there is an error, point it out.
80
 
81
- Current Conversation:
82
- {history}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
- Candidate: {input}
85
- AI: """)
86
- st.session_state.screen = ConversationChain(prompt=PROMPT, llm=llm,
87
- memory=st.session_state.memory)
88
- #if "feedback" not in st.session_state:
89
- llm = ChatOpenAI(
90
- model_name = "gpt-3.5-turbo",
91
- temperature = 0.5,)
92
  st.session_state.feedback = ConversationChain(
93
- prompt=PromptTemplate(input_variables = ["history", "input"], template = templates.feedback_template),
94
- llm=llm,
95
- memory = st.session_state.memory,
96
- )
 
 
 
27
 
28
  def initialize_session_state(template=None, position=None):
29
  """ initialize session states """
30
+ print("Initializing session state...")
31
+
32
  if 'jd' in st.session_state:
33
+ print("Using job description for embedding...")
34
  st.session_state.docsearch = embedding(st.session_state.jd)
35
  else:
36
+ print("Using resume for embedding...")
37
  st.session_state.docsearch = embedding(resume_reader(st.session_state.resume))
38
 
39
+ print("Creating retriever...")
40
  st.session_state.retriever = st.session_state.docsearch.as_retriever(search_type="similarity")
41
+
42
  if 'jd' in st.session_state:
43
+ print("Using job description prompt template...")
44
  Interview_Prompt = PromptTemplate(input_variables=["context", "question"],
45
+ template=template)
46
  st.session_state.chain_type_kwargs = {"prompt": Interview_Prompt}
47
  else:
48
+ print("Using position-specific prompt template...")
49
+ st.session_state.chain_type_kwargs = prompt_sector(position, templates)
50
+
51
+ print("Creating memory...")
52
  st.session_state.memory = ConversationBufferMemory()
53
+
54
+ print("Initializing history and token count...")
55
  st.session_state.history = []
 
 
56
  st.session_state.token_count = 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ print("Creating guideline...")
59
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.6)
60
+ st.session_state.guideline = RetrievalQA.from_chain_type(
61
+ llm=llm,
62
+ chain_type_kwargs=st.session_state.chain_type_kwargs,
63
+ chain_type='stuff',
64
+ retriever=st.session_state.retriever,
65
+ memory=st.session_state.memory
66
+ ).run("Create an interview guideline and prepare only one questions for each topic. Make sure the questions tests the technical knowledge")
 
67
 
68
+ print("Creating screen chain...")
69
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.8)
70
+ PROMPT = PromptTemplate(
71
+ input_variables=["history", "input"],
72
+ template="""I want you to act as an interviewer strictly following the guideline in the current conversation.
73
+ Ask me questions and wait for my answers like a real person.
74
+ Do not write explanations.
75
+ Ask question like a real person, only one question at a time.
76
+ Do not ask the same question.
77
+ Do not repeat the question.
78
+ Do ask follow-up questions if necessary.
79
+ You name is GPTInterviewer.
80
+ I want you to only reply as an interviewer.
81
+ Do not write all the conversation at once.
82
+ If there is an error, point it out.
83
+ Current Conversation:
84
+ {history}
85
+ Candidate: {input}
86
+ AI: """)
87
+ st.session_state.screen = ConversationChain(prompt=PROMPT, llm=llm, memory=st.session_state.memory)
88
 
89
+ print("Creating feedback chain...")
90
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.5)
 
 
 
 
 
 
91
  st.session_state.feedback = ConversationChain(
92
+ prompt=PromptTemplate(input_variables=["history", "input"], template=templates.feedback_template),
93
+ llm=llm,
94
+ memory=st.session_state.memory,
95
+ )
96
+
97
+ print("Session state initialized.")