Update README.md
Browse files
README.md
CHANGED
@@ -94,8 +94,13 @@ To load the quantized model and perform inference, you will need the `llama_cpp`
|
|
94 |
from llama_cpp import Llama
|
95 |
import os
|
96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
# Ensure the model path exists
|
98 |
-
MODEL_PATH = "/content/unsloth.Q4_K_M.gguf"
|
99 |
assert os.path.exists(MODEL_PATH), f"Model path {MODEL_PATH} does not exist."
|
100 |
|
101 |
# Create the prompt for SQL query generation
|
|
|
94 |
from llama_cpp import Llama
|
95 |
import os
|
96 |
|
97 |
+
# Get the current directory
|
98 |
+
current_directory = os.getcwd()
|
99 |
+
|
100 |
+
# Construct the full model path
|
101 |
+
MODEL_PATH = os.path.join(current_directory, "unsloth.Q4_K_M.gguf")
|
102 |
+
|
103 |
# Ensure the model path exists
|
|
|
104 |
assert os.path.exists(MODEL_PATH), f"Model path {MODEL_PATH} does not exist."
|
105 |
|
106 |
# Create the prompt for SQL query generation
|