zetavg commited on
Commit
329aa8d
·
1 Parent(s): 61dd8eb

support defining models that actually lives on hf

Browse files
Files changed (1) hide show
  1. llama_lora/models.py +12 -0
llama_lora/models.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import sys
3
  import gc
 
4
 
5
  import torch
6
  from transformers import LlamaForCausalLM, LlamaTokenizer
@@ -106,6 +107,17 @@ def get_model(
106
  if os.path.isdir(possible_lora_model_path):
107
  peft_model_name_or_path = possible_lora_model_path
108
 
 
 
 
 
 
 
 
 
 
 
 
109
  Global.loaded_models.prepare_to_set()
110
  clear_cache()
111
 
 
1
  import os
2
  import sys
3
  import gc
4
+ import json
5
 
6
  import torch
7
  from transformers import LlamaForCausalLM, LlamaTokenizer
 
107
  if os.path.isdir(possible_lora_model_path):
108
  peft_model_name_or_path = possible_lora_model_path
109
 
110
+ possible_model_info_json_path = os.path.join(possible_lora_model_path, "info.json")
111
+ if os.path.isfile(possible_model_info_json_path):
112
+ try:
113
+ with open(possible_model_info_json_path, "r") as file:
114
+ json_data = json.load(file)
115
+ possible_hf_model_name = json_data.get("hf_model_name")
116
+ if possible_hf_model_name:
117
+ peft_model_name_or_path = possible_hf_model_name
118
+ except Exception as e:
119
+ raise ValueError("Error reading model info from {possible_model_info_json_path}: {e}")
120
+
121
  Global.loaded_models.prepare_to_set()
122
  clear_cache()
123