xuxw98 commited on
Commit
1047033
1 Parent(s): c8ac827

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -24
app.py CHANGED
@@ -23,38 +23,16 @@ import gradio as gr
23
  import os
24
  import glob
25
  import json
26
- from setuptools import setup, find_packages
27
-
28
- _PATH_ROOT = os.path.dirname(__file__)
29
-
30
- with open(os.path.join(_PATH_ROOT, "README.md"), encoding="utf-8") as fo:
31
- readme = fo.read()
32
-
33
- setup(
34
- name='lit-llama',
35
- version='0.1.0',
36
- description='Implementation of the LLaMA language model',
37
- author='Lightning AI',
38
- url='https://github.com/lightning-AI/lit-llama',
39
- install_requires=[
40
- "torch>=2.0.0",
41
- "lightning @ git+https://github.com/Lightning-AI/lightning@master",
42
- "sentencepiece",
43
- "bitsandbytes",
44
- ],
45
- packages=find_packages(),
46
- long_description=readme,
47
- long_description_content_type="text/markdown",
48
- )
49
 
50
 
51
  # os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
52
  torch.set_float32_matmul_precision("high")
 
53
 
54
  def model_load(
55
  adapter_path: Path = Path("out/adapter/alpaca/lit-llama-adapter-finetuned_15k.pth"),
56
  pretrained_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"),
57
- quantize: Optional[str] = "llm.int8",
58
  ):
59
 
60
  fabric = L.Fabric(devices=1)
 
23
  import os
24
  import glob
25
  import json
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
 
28
  # os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
29
  torch.set_float32_matmul_precision("high")
30
+ # quantize: Optional[str] = "llm.int8",
31
 
32
  def model_load(
33
  adapter_path: Path = Path("out/adapter/alpaca/lit-llama-adapter-finetuned_15k.pth"),
34
  pretrained_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"),
35
+ quantize: Optional[str] = "",
36
  ):
37
 
38
  fabric = L.Fabric(devices=1)