IamRulo commited on
Commit
08eb95e
·
verified ·
1 Parent(s): 5f0599c

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +3 -4
agent.py CHANGED
@@ -684,6 +684,7 @@ def build_graph(provider: str = "huggingface"):
684
  #endpoint_url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
685
  #endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen3-30B-A3B",
686
  endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B.Instruct",
 
687
  temperature=0,
688
  ),
689
  )
@@ -692,10 +693,8 @@ def build_graph(provider: str = "huggingface"):
692
  llm=HuggingFaceEndpoint(
693
  #repo_id="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
694
  #endpoint_url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
695
- #endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen3-4B",
696
- endpoint_url="https://api-inference.huggingface.co/models/microsoft/phi-4",
697
- token=https://huggingface.co/spaces/IamRulo/Final_Assignment_Template/settings/HF_TOKEN,
698
-
699
  #task="text-generation", # for chat‐style use “text-generation”
700
  #max_new_tokens=1024,
701
  #do_sample=False,
 
684
  #endpoint_url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
685
  #endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen3-30B-A3B",
686
  endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B.Instruct",
687
+ #endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen3-4B",
688
  temperature=0,
689
  ),
690
  )
 
693
  llm=HuggingFaceEndpoint(
694
  #repo_id="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
695
  #endpoint_url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
696
+ #endpoint_url="https://api-inference.huggingface.co/models/microsoft/phi-4",
697
+ endpoint_url="https://api-inference.huggingface.co/models/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
 
 
698
  #task="text-generation", # for chat‐style use “text-generation”
699
  #max_new_tokens=1024,
700
  #do_sample=False,