Update llm/llamacpp/lc_model.py
Browse files- llm/llamacpp/lc_model.py +16 -14
llm/llamacpp/lc_model.py
CHANGED
@@ -32,8 +32,9 @@ try:
|
|
32 |
except FileNotFoundError:
|
33 |
print("Error: Could not move up. You might be at the root directory.")
|
34 |
|
35 |
-
|
36 |
-
|
|
|
37 |
|
38 |
|
39 |
class LC_TinyLlama(LCInterface, ABC):
|
@@ -42,11 +43,12 @@ class LC_TinyLlama(LCInterface, ABC):
|
|
42 |
self.prompt_id = prompt_id
|
43 |
|
44 |
self.model_config = config["LC_TinyLlama-1.1B-Chat-v1.0-GGUF"]
|
|
|
45 |
|
46 |
try:
|
47 |
get_file = requests.get(self.model_config["model_url"])
|
48 |
if get_file.status_code == 200:
|
49 |
-
path_to_model = os.path.join(
|
50 |
with open(path_to_model, "wb") as f:
|
51 |
f.write(get_file.content)
|
52 |
logger.info("Model file successfully recorded")
|
@@ -61,7 +63,7 @@ class LC_TinyLlama(LCInterface, ABC):
|
|
61 |
@staticmethod
|
62 |
def __read_yaml():
|
63 |
try:
|
64 |
-
yaml_file = os.path.join(
|
65 |
with open(yaml_file, 'r') as file:
|
66 |
data = yaml.safe_load(file)
|
67 |
return data
|
@@ -78,7 +80,7 @@ class LC_TinyLlama(LCInterface, ABC):
|
|
78 |
prompt = PromptTemplate(template=template, input_variables=["entity"])
|
79 |
|
80 |
llm = LlamaCpp(
|
81 |
-
model_path=os.path.join(
|
82 |
temperature=self.model_config["temperature"],
|
83 |
max_tokens=self.model_config["max_tokens"],
|
84 |
top_p=self.model_config["top_p"],
|
@@ -111,11 +113,11 @@ class LC_TinyLlama(LCInterface, ABC):
|
|
111 |
|
112 |
def get_unused(self, current_lc):
|
113 |
|
114 |
-
if len(os.listdir(
|
115 |
-
file_names = [os.path.basename(md) for md in os.listdir(
|
116 |
for item in file_names:
|
117 |
if item != current_lc:
|
118 |
-
unused_model_file = os.path.join(
|
119 |
return {item: unused_model_file}
|
120 |
else:
|
121 |
return None
|
@@ -137,7 +139,7 @@ class LC_Phi3(LCInterface, ABC):
|
|
137 |
try:
|
138 |
get_file = requests.get(self.model_config["model_url"])
|
139 |
if get_file.status_code == 200:
|
140 |
-
path_to_model = os.path.join(
|
141 |
with open(path_to_model, "wb") as f:
|
142 |
f.write(get_file.content)
|
143 |
logger.info("Model file successfully recorded")
|
@@ -151,7 +153,7 @@ class LC_Phi3(LCInterface, ABC):
|
|
151 |
@staticmethod
|
152 |
def __read_yaml():
|
153 |
try:
|
154 |
-
yaml_file = os.path.join(
|
155 |
with open(yaml_file, 'r') as file:
|
156 |
data = yaml.safe_load(file)
|
157 |
return data
|
@@ -168,7 +170,7 @@ class LC_Phi3(LCInterface, ABC):
|
|
168 |
prompt = PromptTemplate(template=template, input_variables=["entity"])
|
169 |
|
170 |
llm = LlamaCpp(
|
171 |
-
model_path=os.path.join(
|
172 |
temperature=self.model_config["temperature"],
|
173 |
max_tokens=self.model_config["max_tokens"],
|
174 |
top_p=self.model_config["top_p"],
|
@@ -201,11 +203,11 @@ class LC_Phi3(LCInterface, ABC):
|
|
201 |
|
202 |
def get_unused(self, current_lc):
|
203 |
|
204 |
-
if len(os.listdir(
|
205 |
-
file_names = [os.path.basename(md) for md in os.listdir(
|
206 |
for item in file_names:
|
207 |
if item != current_lc:
|
208 |
-
unused_model_file = os.path.join(
|
209 |
return {item: unused_model_file}
|
210 |
else:
|
211 |
return None
|
|
|
32 |
except FileNotFoundError:
|
33 |
print("Error: Could not move up. You might be at the root directory.")
|
34 |
|
35 |
+
work_dir = os.getcwd()
|
36 |
+
|
37 |
+
models_dir = os.path.join(work_dir, "models")
|
38 |
|
39 |
|
40 |
class LC_TinyLlama(LCInterface, ABC):
|
|
|
43 |
self.prompt_id = prompt_id
|
44 |
|
45 |
self.model_config = config["LC_TinyLlama-1.1B-Chat-v1.0-GGUF"]
|
46 |
+
|
47 |
|
48 |
try:
|
49 |
get_file = requests.get(self.model_config["model_url"])
|
50 |
if get_file.status_code == 200:
|
51 |
+
path_to_model = os.path.join(models_dir, self.model_config["model_name"])
|
52 |
with open(path_to_model, "wb") as f:
|
53 |
f.write(get_file.content)
|
54 |
logger.info("Model file successfully recorded")
|
|
|
63 |
@staticmethod
|
64 |
def __read_yaml():
|
65 |
try:
|
66 |
+
yaml_file = os.path.join(work_dir, 'prompts.yaml')
|
67 |
with open(yaml_file, 'r') as file:
|
68 |
data = yaml.safe_load(file)
|
69 |
return data
|
|
|
80 |
prompt = PromptTemplate(template=template, input_variables=["entity"])
|
81 |
|
82 |
llm = LlamaCpp(
|
83 |
+
model_path=os.path.join(models_dir, self.model_config["model_name"]),
|
84 |
temperature=self.model_config["temperature"],
|
85 |
max_tokens=self.model_config["max_tokens"],
|
86 |
top_p=self.model_config["top_p"],
|
|
|
113 |
|
114 |
def get_unused(self, current_lc):
|
115 |
|
116 |
+
if len(os.listdir(models_dir)) > 1:
|
117 |
+
file_names = [os.path.basename(md) for md in os.listdir(models_dir)]
|
118 |
for item in file_names:
|
119 |
if item != current_lc:
|
120 |
+
unused_model_file = os.path.join(models_dir, item)
|
121 |
return {item: unused_model_file}
|
122 |
else:
|
123 |
return None
|
|
|
139 |
try:
|
140 |
get_file = requests.get(self.model_config["model_url"])
|
141 |
if get_file.status_code == 200:
|
142 |
+
path_to_model = os.path.join(models_dir, self.model_config["model_name"])
|
143 |
with open(path_to_model, "wb") as f:
|
144 |
f.write(get_file.content)
|
145 |
logger.info("Model file successfully recorded")
|
|
|
153 |
@staticmethod
|
154 |
def __read_yaml():
|
155 |
try:
|
156 |
+
yaml_file = os.path.join(work_dir, 'prompts.yaml')
|
157 |
with open(yaml_file, 'r') as file:
|
158 |
data = yaml.safe_load(file)
|
159 |
return data
|
|
|
170 |
prompt = PromptTemplate(template=template, input_variables=["entity"])
|
171 |
|
172 |
llm = LlamaCpp(
|
173 |
+
model_path=os.path.join(models_dir, self.model_config["model_name"]),
|
174 |
temperature=self.model_config["temperature"],
|
175 |
max_tokens=self.model_config["max_tokens"],
|
176 |
top_p=self.model_config["top_p"],
|
|
|
203 |
|
204 |
def get_unused(self, current_lc):
|
205 |
|
206 |
+
if len(os.listdir(models_dir)) > 1:
|
207 |
+
file_names = [os.path.basename(md) for md in os.listdir(models_dir)]
|
208 |
for item in file_names:
|
209 |
if item != current_lc:
|
210 |
+
unused_model_file = os.path.join(models_dir, item)
|
211 |
return {item: unused_model_file}
|
212 |
else:
|
213 |
return None
|