jafhaponiuk commited on
Commit
864653e
·
verified ·
1 Parent(s): 63938a3

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +6 -7
agent.py CHANGED
@@ -11,8 +11,7 @@ from langchain_core.output_parsers import StrOutputParser # Explicitly imported
11
 
12
  from langgraph.graph import StateGraph, END
13
 
14
- # --- Revert to Hugging Face LLM imports ---
15
- from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace # Correct imports for Hugging Face LLMs
16
 
17
  # Import tools_for_llm from your tools.py file
18
  from tools import tools_for_llm
@@ -24,13 +23,13 @@ load_dotenv()
24
  # --- Initialize the language model (using Hugging Face Inference Endpoint) ---
25
  # Ensure that your HF_TOKEN is available as an environment variable
26
  # If using a specific endpoint, verify it's correct.
27
- endpoint = HuggingFaceEndpoint(
28
- # endpoint_url="https://api-inference.huggingface.co/models/meta-llama/Llama-3-70b-instruct", # Using Llama-3-70b-instruct as per your original code
29
- # endpoint_url="https://api-inference.huggingface.co/models/meta-llama/Llama-3-8b-instruct",
30
  endpoint_url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
31
  temperature=0.01, # Keep a low temperature for consistent tool-use decisions
 
 
32
  )
33
- llm = ChatHuggingFace(llm=endpoint)
34
 
35
  # Load the system prompt from file
36
  with open("system_prompt.txt", "r", encoding="utf-8") as f:
@@ -60,7 +59,7 @@ def get_tool_descriptions(tool_list):
60
  for k, v in pydantic_args.items():
61
  arg_type = v.get('type', 'any')
62
  arg_description = v.get('description', '')
63
- args_str_list.append(f"{k}: {arg_type} ({arg_description})")
64
  if args_str_list:
65
  args_schema_str = f"Arguments: ({', '.join(args_str_list)})"
66
  except Exception:
 
11
 
12
  from langgraph.graph import StateGraph, END
13
 
14
+ from langchain_huggingface import HuggingFaceEndpoint, HuggingFaceEmbeddings, HuggingFaceTextGenInference
 
15
 
16
  # Import tools_for_llm from your tools.py file
17
  from tools import tools_for_llm
 
23
  # --- Initialize the language model (using Hugging Face Inference Endpoint) ---
24
  # Ensure that your HF_TOKEN is available as an environment variable
25
  # If using a specific endpoint, verify it's correct.
26
+
27
+ llm = HuggingFaceTextGenInference(
 
28
  endpoint_url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
29
  temperature=0.01, # Keep a low temperature for consistent tool-use decisions
30
+ # Si tu HF_TOKEN no se está cargando automáticamente, puedes añadirlo aquí:
31
+ # huggingfacehub_api_token=os.getenv("HF_TOKEN")
32
  )
 
33
 
34
  # Load the system prompt from file
35
  with open("system_prompt.txt", "r", encoding="utf-8") as f:
 
59
  for k, v in pydantic_args.items():
60
  arg_type = v.get('type', 'any')
61
  arg_description = v.get('description', '')
62
+ args_str_list.append(f"{k}: {arg_type}: {arg_description}") # Typo fixed here: arg_type: arg_description
63
  if args_str_list:
64
  args_schema_str = f"Arguments: ({', '.join(args_str_list)})"
65
  except Exception: