JarvisChan630 commited on
Commit
e71eca9
1 Parent(s): af2994b
.gitignore CHANGED
@@ -1,4 +1,5 @@
1
  /config/config.yaml
 
2
  __pycache__/
3
  *.pyc
4
  /.vscode
 
1
  /config/config.yaml
2
+ .env
3
  __pycache__/
4
  *.pyc
5
  /.vscode
README.md CHANGED
@@ -26,6 +26,10 @@ Thanks John Adeojo, who brings this wonderful project to open source community!
26
  - Hugging Face deploy
27
 
28
  ## TODO
 
 
 
 
29
  [] Long-term memory.
30
  [] Full Ollama and vLLM integration.
31
  [] Integrations to RAG platforms for more intelligent document processing and faster RAG.
 
26
  - Hugging Face deploy
27
 
28
  ## TODO
29
+ [] fix local docker bugs,local test
30
+ [] deploy to Huggingface
31
+
32
+
33
  [] Long-term memory.
34
  [] Full Ollama and vLLM integration.
35
  [] Integrations to RAG platforms for more intelligent document processing and faster RAG.
agents/meta_agent.py CHANGED
@@ -473,7 +473,7 @@ if __name__ == "__main__":
473
  break
474
 
475
  # current_time = datetime.now()
476
- recursion_limit = 15
477
  state["recursion_limit"] = recursion_limit
478
  state["user_input"] = query
479
  limit = {"recursion_limit": recursion_limit}
 
473
  break
474
 
475
  # current_time = datetime.now()
476
+ recursion_limit = 30
477
  state["recursion_limit"] = recursion_limit
478
  state["user_input"] = query
479
  limit = {"recursion_limit": recursion_limit}
chat.py CHANGED
@@ -19,10 +19,11 @@ from agents.jar3d_agent import (State,
19
  from agents.base_agent import BaseAgent
20
  from utils.read_markdown import read_markdown_file
21
  from config.load_configs import load_config
 
22
 
23
 
24
- config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
25
- load_config(config_path)
26
 
27
  # Load environment variables from .env file
28
  load_dotenv()
 
19
  from agents.base_agent import BaseAgent
20
  from utils.read_markdown import read_markdown_file
21
  from config.load_configs import load_config
22
+ from dotenv import load_dotenv
23
 
24
 
25
+ # config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
26
+ # load_config(config_path)
27
 
28
  # Load environment variables from .env file
29
  load_dotenv()
models/llms.py CHANGED
@@ -7,6 +7,8 @@ from typing import List, Dict
7
  from utils.logging import log_function, setup_logging
8
  from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type
9
  from config.load_configs import load_config
 
 
10
 
11
  setup_logging(level=logging.DEBUG)
12
  logger = logging.getLogger(__name__)
@@ -30,6 +32,7 @@ class MistralModel(BaseModel):
30
  super().__init__(temperature, model, json_response, max_retries, retry_delay)
31
  config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
32
  load_config(config_path)
 
33
  self.api_key = os.environ.get("MISTRAL_API_KEY")
34
  self.headers = {
35
  'Content-Type': 'application/json',
@@ -389,8 +392,9 @@ class VllmModel(BaseModel):
389
  class OpenAIModel(BaseModel):
390
  def __init__(self, temperature: float, model: str, json_response: bool, max_retries: int = 3, retry_delay: int = 1):
391
  super().__init__(temperature, model, json_response, max_retries, retry_delay)
392
- config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
393
- load_config(config_path)
 
394
  self.model_endpoint = 'https://api.302.ai/v1/chat/completions'
395
  self.api_key = os.getenv('OPENAI_API_KEY')
396
  self.headers = {
 
7
  from utils.logging import log_function, setup_logging
8
  from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type
9
  from config.load_configs import load_config
10
+ from dotenv import load_dotenv
11
+
12
 
13
  setup_logging(level=logging.DEBUG)
14
  logger = logging.getLogger(__name__)
 
32
  super().__init__(temperature, model, json_response, max_retries, retry_delay)
33
  config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
34
  load_config(config_path)
35
+ # load_config()
36
  self.api_key = os.environ.get("MISTRAL_API_KEY")
37
  self.headers = {
38
  'Content-Type': 'application/json',
 
392
  class OpenAIModel(BaseModel):
393
  def __init__(self, temperature: float, model: str, json_response: bool, max_retries: int = 3, retry_delay: int = 1):
394
  super().__init__(temperature, model, json_response, max_retries, retry_delay)
395
+ # config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
396
+ # load_config(config_path)
397
+ load_dotenv()
398
  self.model_endpoint = 'https://api.302.ai/v1/chat/completions'
399
  self.api_key = os.getenv('OPENAI_API_KEY')
400
  self.headers = {
tools/google_serper.py CHANGED
@@ -6,6 +6,7 @@ sys.path.insert(0, root_dir)
6
  import requests
7
  from typing import Dict, Any
8
  from config.load_configs import load_config
 
9
 
10
  def format_results(organic_results: str) -> str:
11
  result_strings = []
@@ -33,8 +34,9 @@ def format_shopping_results(shopping_results: list) -> str:
33
  return '\n'.join(result_strings)
34
 
35
  def serper_search(query: str, location: str) -> Dict[str, Any]:
36
- config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
37
- load_config(config_path)
 
38
  search_url = "https://google.serper.dev/search"
39
  headers = {
40
  'Content-Type': 'application/json',
@@ -63,8 +65,9 @@ def serper_search(query: str, location: str) -> Dict[str, Any]:
63
  return f"JSON decoding error occurred: {json_err}"
64
 
65
  def serper_shopping_search(query: str, location: str) -> Dict[str, Any]:
66
- config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
67
- load_config(config_path)
 
68
  search_url = "https://google.serper.dev/shopping"
69
  headers = {
70
  'Content-Type': 'application/json',
@@ -89,8 +92,9 @@ def serper_shopping_search(query: str, location: str) -> Dict[str, Any]:
89
  return f"JSON decoding error occurred: {json_err}"
90
 
91
  def serper_scholar_search(query: str, location: str) -> Dict[str, Any]:
92
- config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
93
- load_config(config_path)
 
94
  search_url = "https://google.serper.dev/scholar"
95
  headers = {
96
  'Content-Type': 'application/json',
 
6
  import requests
7
  from typing import Dict, Any
8
  from config.load_configs import load_config
9
+ from dotenv import load_dotenv
10
 
11
  def format_results(organic_results: str) -> str:
12
  result_strings = []
 
34
  return '\n'.join(result_strings)
35
 
36
  def serper_search(query: str, location: str) -> Dict[str, Any]:
37
+ # config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
38
+ # load_config(config_path)
39
+ load_dotenv()
40
  search_url = "https://google.serper.dev/search"
41
  headers = {
42
  'Content-Type': 'application/json',
 
65
  return f"JSON decoding error occurred: {json_err}"
66
 
67
  def serper_shopping_search(query: str, location: str) -> Dict[str, Any]:
68
+ # config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
69
+ # load_config(config_path)
70
+ load_dotenv()
71
  search_url = "https://google.serper.dev/shopping"
72
  headers = {
73
  'Content-Type': 'application/json',
 
92
  return f"JSON decoding error occurred: {json_err}"
93
 
94
  def serper_scholar_search(query: str, location: str) -> Dict[str, Any]:
95
+ # config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
96
+ # load_config(config_path)
97
+ load_dotenv()
98
  search_url = "https://google.serper.dev/scholar"
99
  headers = {
100
  'Content-Type': 'application/json',
tools/legacy/offline_rag_tool.py CHANGED
@@ -24,13 +24,15 @@ from config.load_configs import load_config
24
  from langchain_community.docstore.in_memory import InMemoryDocstore
25
  from fake_useragent import UserAgent
26
  from multiprocessing import Pool, cpu_count
 
27
 
28
  root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
29
  sys.path.insert(0, root_dir)
30
 
31
- config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
32
- load_config(config_path)
33
 
 
34
  ua = UserAgent()
35
  os.environ["USER_AGENT"] = ua.random
36
  os.environ["FAISS_OPT_LEVEL"] = "generic"
 
24
  from langchain_community.docstore.in_memory import InMemoryDocstore
25
  from fake_useragent import UserAgent
26
  from multiprocessing import Pool, cpu_count
27
+ from dotenv import load_dotenv
28
 
29
  root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
30
  sys.path.insert(0, root_dir)
31
 
32
+ # config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
33
+ # load_config(config_path)
34
 
35
+ load_dotenv()
36
  ua = UserAgent()
37
  os.environ["USER_AGENT"] = ua.random
38
  os.environ["FAISS_OPT_LEVEL"] = "generic"
tools/legacy/rag_tool.py CHANGED
@@ -23,13 +23,15 @@ from langchain.schema import Document
23
  from config.load_configs import load_config
24
  from langchain_community.docstore.in_memory import InMemoryDocstore
25
  from fake_useragent import UserAgent
 
26
 
27
  root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
28
  sys.path.insert(0, root_dir)
29
 
30
- config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
31
- load_config(config_path)
32
 
 
33
  ua = UserAgent()
34
  os.environ["USER_AGENT"] = ua.random
35
  os.environ["FAISS_OPT_LEVEL"] = "generic"
 
23
  from config.load_configs import load_config
24
  from langchain_community.docstore.in_memory import InMemoryDocstore
25
  from fake_useragent import UserAgent
26
+ from dotenv import load_dotenv
27
 
28
  root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
29
  sys.path.insert(0, root_dir)
30
 
31
+ # config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
32
+ # load_config(config_path)
33
 
34
+ load_dotenv()
35
  ua = UserAgent()
36
  os.environ["USER_AGENT"] = ua.random
37
  os.environ["FAISS_OPT_LEVEL"] = "generic"
tools/offline_graph_rag_tool_with_async.py CHANGED
@@ -26,12 +26,14 @@ from config.load_configs import load_config
26
  from langchain_community.docstore.in_memory import InMemoryDocstore
27
  from fake_useragent import UserAgent
28
  import asyncio
 
29
 
30
  root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
31
  sys.path.insert(0, root_dir)
32
 
33
- config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
34
- load_config(config_path)
 
35
 
36
  ua = UserAgent()
37
  os.environ["USER_AGENT"] = ua.random
 
26
  from langchain_community.docstore.in_memory import InMemoryDocstore
27
  from fake_useragent import UserAgent
28
  import asyncio
29
+ from dotenv import load_dotenv
30
 
31
  root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
32
  sys.path.insert(0, root_dir)
33
 
34
+ # config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.yaml')
35
+ # load_config(config_path)
36
+ load_dotenv()
37
 
38
  ua = UserAgent()
39
  os.environ["USER_AGENT"] = ua.random