Upload app.py
Browse files
app.py
CHANGED
@@ -23,6 +23,30 @@ import gradio as gr
|
|
23 |
import os
|
24 |
import glob
|
25 |
import json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
# os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
|
28 |
torch.set_float32_matmul_precision("high")
|
|
|
23 |
import os
|
24 |
import glob
|
25 |
import json
|
26 |
+
from setuptools import setup, find_packages
|
27 |
+
|
28 |
+
_PATH_ROOT = os.path.dirname(__file__)
|
29 |
+
|
30 |
+
with open(os.path.join(_PATH_ROOT, "README.md"), encoding="utf-8") as fo:
|
31 |
+
readme = fo.read()
|
32 |
+
|
33 |
+
setup(
|
34 |
+
name='lit-llama',
|
35 |
+
version='0.1.0',
|
36 |
+
description='Implementation of the LLaMA language model',
|
37 |
+
author='Lightning AI',
|
38 |
+
url='https://github.com/lightning-AI/lit-llama',
|
39 |
+
install_requires=[
|
40 |
+
"torch>=2.0.0",
|
41 |
+
"lightning @ git+https://github.com/Lightning-AI/lightning@master",
|
42 |
+
"sentencepiece",
|
43 |
+
"bitsandbytes",
|
44 |
+
],
|
45 |
+
packages=find_packages(),
|
46 |
+
long_description=readme,
|
47 |
+
long_description_content_type="text/markdown",
|
48 |
+
)
|
49 |
+
|
50 |
|
51 |
# os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
|
52 |
torch.set_float32_matmul_precision("high")
|