Mehmet-Ali commited on
Commit
4bb79bc
·
verified ·
1 Parent(s): 282d80e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -8
app.py CHANGED
@@ -5,12 +5,14 @@ import pytz
5
  import yaml
6
  import os
7
  from tools.final_answer import FinalAnswerTool
 
8
  # import mapbox methods
9
  from tools.mapbox_tools import geocode_location, search_nearby_places
10
 
11
  from Gradio_UI import GradioUI
12
 
13
-
 
14
  final_answer = FinalAnswerTool()
15
 
16
  # Create a DuckDuckGo search tool
@@ -19,15 +21,15 @@ search_tool = DuckDuckGoSearchTool()
19
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
20
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
21
 
22
- model = HfApiModel(
23
- max_tokens=2096,
24
- temperature=0.5,
25
- model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
26
  #model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
27
- custom_role_conversions=None,
28
- )
29
  # Use this as an alternative
30
- #model = LiteLLMModel(model_id="gemini/gemini-2.0-flash-lite", api_key=os.getenv(key="GEMINI_API_KEY"))
31
 
32
  with open("prompts.yaml", 'r') as stream:
33
  prompt_templates = yaml.safe_load(stream)
 
5
  import yaml
6
  import os
7
  from tools.final_answer import FinalAnswerTool
8
+ from dotenv import load_dotenv
9
  # import mapbox methods
10
  from tools.mapbox_tools import geocode_location, search_nearby_places
11
 
12
  from Gradio_UI import GradioUI
13
 
14
+ # Load environment variables from .env file
15
+ load_dotenv()
16
  final_answer = FinalAnswerTool()
17
 
18
  # Create a DuckDuckGo search tool
 
21
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
22
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
23
 
24
+ #model = HfApiModel(
25
+ #max_tokens=2096,
26
+ #temperature=0.5,
27
+ #model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
28
  #model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
29
+ #custom_role_conversions=None,
30
+ #)
31
  # Use this as an alternative
32
+ model = LiteLLMModel(model_id="gemini/gemini-2.0-flash-lite", api_key=os.getenv(key="GEMINI_API_KEY"))
33
 
34
  with open("prompts.yaml", 'r') as stream:
35
  prompt_templates = yaml.safe_load(stream)