Ethan Chang commited on
Commit
d1afae8
1 Parent(s): 6665451

Added repo_id and filename to config.yml

Browse files
code/modules/chat/chat_model_loader.py CHANGED
@@ -18,8 +18,8 @@ class ChatModelLoader:
18
 
19
  def _verify_model_cache(self, model_cache_path):
20
  hf_hub_download(
21
- repo_id="TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
22
- filename="tinyllama-1.1b-chat-v1.0.Q5_0.gguf",
23
  cache_dir=model_cache_path
24
  )
25
  return str(list(Path(model_cache_path).glob("*/snapshots/*/*.gguf"))[0])
 
18
 
19
  def _verify_model_cache(self, model_cache_path):
20
  hf_hub_download(
21
+ repo_id=self.config["llm_params"]["local_llm_params"]["repo_id"],
22
+ filename=self.config["llm_params"]["local_llm_params"]["filename"],
23
  cache_dir=model_cache_path
24
  )
25
  return str(list(Path(model_cache_path).glob("*/snapshots/*/*.gguf"))[0])
code/modules/config/config.yml CHANGED
@@ -1,6 +1,6 @@
1
  log_dir: '../storage/logs' # str
2
  log_chunk_dir: '../storage/logs/chunks' # str
3
- device: 'cpu' # str [cuda, cpu]
4
 
5
  vectorstore:
6
  embedd_files: False # bool
@@ -32,6 +32,8 @@ llm_params:
32
  local_llm_params:
33
  model: 'tiny-llama'
34
  temperature: 0.7
 
 
35
 
36
  chat_logging:
37
  log_chat: False # bool
 
1
  log_dir: '../storage/logs' # str
2
  log_chunk_dir: '../storage/logs/chunks' # str
3
+ device: 'cuda' # str [cuda, cpu]
4
 
5
  vectorstore:
6
  embedd_files: False # bool
 
32
  local_llm_params:
33
  model: 'tiny-llama'
34
  temperature: 0.7
35
+ repo_id: 'TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF' # HuggingFace repo id
36
+ filename: 'tinyllama-1.1b-chat-v1.0.Q5_0.gguf' # Specific name of gguf file in the repo
37
 
38
  chat_logging:
39
  log_chat: False # bool