Spaces:
Running
on
Zero
Running
on
Zero
Renamed HF env vars
Browse files- llm_graph.py +4 -4
llm_graph.py
CHANGED
@@ -16,8 +16,8 @@ from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
16 |
load_dotenv()
|
17 |
|
18 |
# Load the environment variables
|
19 |
-
|
20 |
-
|
21 |
|
22 |
AZURE_OPENAI_API_VERSION = os.environ["AZURE_OPENAI_API_VERSION"]
|
23 |
AZURE_OPENAI_DEPLOYMENT = os.environ["AZURE_OPENAI_DEPLOYMENT"]
|
@@ -81,8 +81,8 @@ class LLMGraph:
|
|
81 |
|
82 |
# Hugging Face Inference API for Phi-3-mini-128k-instruct-graph
|
83 |
self.hf_client = InferenceClient(
|
84 |
-
model=
|
85 |
-
token=
|
86 |
)
|
87 |
|
88 |
self.rag = None # Lazy loading of RAG instance
|
|
|
16 |
load_dotenv()
|
17 |
|
18 |
# Load the environment variables
|
19 |
+
HF_API_TOKEN = os.environ["HF_TOKEN"]
|
20 |
+
HF_API_ENDPOINT = os.environ["HF_API_ENDPOINT"]
|
21 |
|
22 |
AZURE_OPENAI_API_VERSION = os.environ["AZURE_OPENAI_API_VERSION"]
|
23 |
AZURE_OPENAI_DEPLOYMENT = os.environ["AZURE_OPENAI_DEPLOYMENT"]
|
|
|
81 |
|
82 |
# Hugging Face Inference API for Phi-3-mini-128k-instruct-graph
|
83 |
self.hf_client = InferenceClient(
|
84 |
+
model=HF_API_ENDPOINT,
|
85 |
+
token=HF_API_TOKEN
|
86 |
)
|
87 |
|
88 |
self.rag = None # Lazy loading of RAG instance
|