xuxw98 commited on
Commit
fe3c026
1 Parent(s): 9cfe815

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -8,8 +8,8 @@ import lightning as L
8
  import torch
9
 
10
  # support running without installing as a package
11
- # wd = Path(__file__).parent.parent.resolve()
12
- # sys.path.append(str(wd))
13
 
14
  from generate import generate
15
  from lit_llama import Tokenizer
@@ -32,7 +32,7 @@ torch.set_float32_matmul_precision("high")
32
  def model_load(
33
  adapter_path: Path = Path("out/adapter/alpaca/lit-llama-adapter-finetuned_15k.pth"),
34
  pretrained_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"),
35
- quantize: Optional[str] = "llm.int8",
36
  ):
37
 
38
  fabric = L.Fabric(devices=1)
 
8
  import torch
9
 
10
  # support running without installing as a package
11
+ wd = Path(__file__).parent.parent.resolve()
12
+ sys.path.append(str(wd))
13
 
14
  from generate import generate
15
  from lit_llama import Tokenizer
 
32
  def model_load(
33
  adapter_path: Path = Path("out/adapter/alpaca/lit-llama-adapter-finetuned_15k.pth"),
34
  pretrained_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"),
35
+ quantize: Optional[str] = None,
36
  ):
37
 
38
  fabric = L.Fabric(devices=1)