ffreemt commited on
Commit
9b6a4ab
1 Parent(s): 8d030a2
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -1,15 +1,17 @@
 
 
1
  import os
2
  import time
3
 
4
  # ruff: noqa: E402
5
  # os.system("pip install --upgrade torch transformers sentencepiece scipy cpm_kernels accelerate bitsandbytes loguru")
6
 
7
- os.system("pip install torch transformers sentencepiece loguru")
8
 
9
  from pathlib import Path
10
 
11
  import torch
12
- from logru import logger
13
  from transformers import AutoModel, AutoTokenizer
14
 
15
  # fix timezone in Linux
@@ -49,7 +51,7 @@ logger.debug("done load")
49
  # model = AutoModelForCausalLM.from_pretrained("openchat/openchat_v2_w", load_in_8bit_fp32_cpu_offload=True, load_in_8bit=True)
50
 
51
  model_path = model.config._dict["model_name_or_path"]
52
- logger.debug(f"{model_path=}")
53
 
54
  model_size_gb = Path(model_path).stat().st_size / 2**30
55
 
 
1
+ """Test various models."""
2
+ # pylint: disable=invalid-name, line-too-long,broad-exception-caught, protected-access
3
  import os
4
  import time
5
 
6
  # ruff: noqa: E402
7
  # os.system("pip install --upgrade torch transformers sentencepiece scipy cpm_kernels accelerate bitsandbytes loguru")
8
 
9
+ # os.system("pip install torch transformers sentencepiece loguru")
10
 
11
  from pathlib import Path
12
 
13
  import torch
14
+ from loguru import logger
15
  from transformers import AutoModel, AutoTokenizer
16
 
17
  # fix timezone in Linux
 
51
  # model = AutoModelForCausalLM.from_pretrained("openchat/openchat_v2_w", load_in_8bit_fp32_cpu_offload=True, load_in_8bit=True)
52
 
53
  model_path = model.config._dict["model_name_or_path"]
54
+ logger.debug(f"{model.config=} {type(model.config)=} {model_path=}")
55
 
56
  model_size_gb = Path(model_path).stat().st_size / 2**30
57