IliaLarchenko commited on
Commit
f02aeda
1 Parent(s): 324d83a

Fixed tests

Browse files
Files changed (2) hide show
  1. tests/candidate.py +1 -1
  2. tests/grader.py +2 -2
tests/candidate.py CHANGED
@@ -19,7 +19,7 @@ load_dotenv()
19
 
20
 
21
  def complete_interview(interview_type, exp_name, requirements="", difficulty="", topic="", model="gpt-3.5-turbo"):
22
- client = OpenAI(url="https://api.openai.com/v1")
23
  llm = LLMManager(config, prompts)
24
  llm_name = config.llm.name
25
 
 
19
 
20
 
21
  def complete_interview(interview_type, exp_name, requirements="", difficulty="", topic="", model="gpt-3.5-turbo"):
22
+ client = OpenAI(base_url="https://api.openai.com/v1")
23
  llm = LLMManager(config, prompts)
24
  llm_name = config.llm.name
25
 
tests/grader.py CHANGED
@@ -6,7 +6,7 @@ from tests.testing_prompts import grader_prompt
6
 
7
 
8
  def grade(json_file_path, model="gpt-4-turbo"):
9
- client = OpenAI(url="https://api.openai.com/v1")
10
 
11
  with open(json_file_path) as file:
12
  interview_data = json.load(file)
@@ -32,7 +32,7 @@ def grade(json_file_path, model="gpt-4-turbo"):
32
  scores = [
33
  feedback[x]
34
  for x in feedback
35
- if x.startswith("interviewer_") or x.startswith("feedback_") or x.startswith("problem_") and feedback[x] is not None
36
  ]
37
  feedback["overall_score"] = sum(scores) / len(scores)
38
 
 
6
 
7
 
8
  def grade(json_file_path, model="gpt-4-turbo"):
9
+ client = OpenAI(base_url="https://api.openai.com/v1")
10
 
11
  with open(json_file_path) as file:
12
  interview_data = json.load(file)
 
32
  scores = [
33
  feedback[x]
34
  for x in feedback
35
+ if (x.startswith("interviewer_") or x.startswith("feedback_") or x.startswith("problem_")) and feedback[x] is not None
36
  ]
37
  feedback["overall_score"] = sum(scores) / len(scores)
38