Kuberwastaken commited on
Commit
02b8759
·
1 Parent(s): 4ac5b26

Fixing model glitching out

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -22,17 +22,18 @@ def create_agent():
22
  final_answer = FinalAnswerTool()
23
  resume_scraper = ResumeScraperTool()
24
 
25
- # Initialize a dedicated InferenceClient using the public endpoint.
26
- client = InferenceClient("https://jc26mwg228mkj8dw.us-east-1.aws.endpoints.huggingface.cloud")
27
-
28
- # Instantiate HfApiModel with our dedicated client.
29
  model = HfApiModel(
30
  max_tokens=2096,
31
  temperature=0.5,
32
  model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
33
  custom_role_conversions=None,
34
- client=client
35
  )
 
 
 
 
 
36
 
37
  with open("prompts.yaml", 'r') as stream:
38
  prompt_templates = yaml.safe_load(stream)
@@ -45,4 +46,4 @@ def create_agent():
45
  prompt_templates=prompt_templates
46
  )
47
 
48
- return agent
 
22
  final_answer = FinalAnswerTool()
23
  resume_scraper = ResumeScraperTool()
24
 
25
+ # Instantiate HfApiModel using Qwen/Qwen2.5-Coder-32B-Instruct for roasting.
 
 
 
26
  model = HfApiModel(
27
  max_tokens=2096,
28
  temperature=0.5,
29
  model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
30
  custom_role_conversions=None,
 
31
  )
32
+
33
+ # Create a dedicated InferenceClient using your public endpoint
34
+ client = InferenceClient("https://jc26mwg228mkj8dw.us-east-1.aws.endpoints.huggingface.cloud")
35
+ # Override the model's client with our dedicated client
36
+ model.client = client
37
 
38
  with open("prompts.yaml", 'r') as stream:
39
  prompt_templates = yaml.safe_load(stream)
 
46
  prompt_templates=prompt_templates
47
  )
48
 
49
+ return agent