Spaces:
Sleeping
Sleeping
drakosfire
commited on
Merge branch 'main' of hf.co:spaces/TheDrakosfire/CollectibleCardGenerator
Browse files- .gitattributes +1 -0
- Dockerfile +46 -0
.gitattributes
CHANGED
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
*.png filter=lfs diff=lfs merge=lfs -text
|
37 |
models/starling-lm-7b-alpha.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
*.png filter=lfs diff=lfs merge=lfs -text
|
37 |
models/starling-lm-7b-alpha.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
38 |
+
cuda_12.4.0_550.54.14_linux.run filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
CHANGED
@@ -2,6 +2,42 @@
|
|
2 |
FROM ubuntu:22.04 as cuda-setup
|
3 |
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
ARG DEBIAN_FRONTEND=noninteractive
|
6 |
|
7 |
# Install necessary libraries including libxml2
|
@@ -38,10 +74,13 @@ ENV PATH="/venv/bin:$PATH"
|
|
38 |
|
39 |
# Llama.cpp requires the ENV variable be set to signal the CUDA build and be built with the CMAKE variables from pip for python use
|
40 |
ENV LLAMA_CUBLAS=1
|
|
|
|
|
41 |
RUN pip install --no-cache-dir torch packaging wheel && \
|
42 |
pip install flash-attn && \
|
43 |
pip install gradio && \
|
44 |
CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama_cpp_python==0.2.55 && \
|
|
|
45 |
pip install pillow && \
|
46 |
pip install diffusers && \
|
47 |
pip install accelerate && \
|
@@ -60,6 +99,11 @@ ENV LD_LIBRARY_PATH=/usr/local/cuda-12.4/lib64:${LD_LIBRARY_PATH}
|
|
60 |
ENV LLAMA_CPP_LIB=/venv/lib/python3.10/site-packages/llama_cpp/libllama.so
|
61 |
ENV VIRTUAL_ENV=/venv
|
62 |
|
|
|
|
|
|
|
|
|
|
|
63 |
# Install Python and create a user
|
64 |
RUN apt-get update && apt-get install -y python3 python3-venv && apt-get clean && rm -rf /var/lib/apt/lists/* && \
|
65 |
useradd -m -u 1000 user
|
@@ -67,6 +111,8 @@ RUN apt-get update && apt-get install -y python3 python3-venv && apt-get clean &
|
|
67 |
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
68 |
# Set working directory and user
|
69 |
COPY . /home/user/app
|
|
|
|
|
70 |
WORKDIR /home/user/app
|
71 |
RUN chown -R user:user /home/user/app/ && \
|
72 |
mkdir -p /home/user/app/output && \
|
|
|
2 |
FROM ubuntu:22.04 as cuda-setup
|
3 |
|
4 |
|
5 |
+
ARG DEBIAN_FRONTEND=noninteractive
|
6 |
+
|
7 |
+
# Install necessary libraries including libxml2
|
8 |
+
RUN apt-get update && \
|
9 |
+
apt-get install -y gcc libxml2 && \
|
10 |
+
apt-get clean && \
|
11 |
+
rm -rf /var/lib/apt/lists/*
|
12 |
+
|
13 |
+
COPY cuda_12.4.0_550.54.14_linux.run .
|
14 |
+
|
15 |
+
# Install wget, download cuda-toolkit and run
|
16 |
+
RUN chmod +x cuda_12.4.0_550.54.14_linux.run && \
|
17 |
+
./cuda_12.4.0_550.54.14_linux.run --silent --toolkit --override
|
18 |
+
|
19 |
+
# Second Stage: Copy necessary CUDA directories install flash-attn
|
20 |
+
FROM ubuntu:22.04 as base-layer
|
21 |
+
|
22 |
+
# Copy the CUDA toolkit from the first stage
|
23 |
+
COPY --from=cuda-setup /usr/local/cuda-12.4 /usr/local/cuda-12.4
|
24 |
+
|
25 |
+
# Set environment variables to enable CUDA commands
|
26 |
+
ENV PATH=/usr/local/cuda-12.4/bin:${PATH}
|
27 |
+
ENV LD_LIBRARY_PATH=/usr/local/cuda-12.4/lib64:${LD_LIBRARY_PATH}
|
28 |
+
|
29 |
+
# Install Python, pip, and virtualenv
|
30 |
+
RUN apt-get update && \
|
31 |
+
apt-get install -y python3 python3-pip python3-venv git && \
|
32 |
+
apt-get clean && \
|
33 |
+
rm -rf /var/lib/apt/lists/*
|
34 |
+
|
35 |
+
# Create a virtual environment and install dependencies
|
36 |
+
RUN python3 -m venv /venv
|
37 |
+
ENV PATH="/venv/bin:$PATH"
|
38 |
+
FROM ubuntu:22.04 as cuda-setup
|
39 |
+
|
40 |
+
|
41 |
ARG DEBIAN_FRONTEND=noninteractive
|
42 |
|
43 |
# Install necessary libraries including libxml2
|
|
|
74 |
|
75 |
# Llama.cpp requires the ENV variable be set to signal the CUDA build and be built with the CMAKE variables from pip for python use
|
76 |
ENV LLAMA_CUBLAS=1
|
77 |
+
RUN pip install --no-cache-dir torch packaging wheel && \
|
78 |
+
pip install flash-attn && \
|
79 |
RUN pip install --no-cache-dir torch packaging wheel && \
|
80 |
pip install flash-attn && \
|
81 |
pip install gradio && \
|
82 |
CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama_cpp_python==0.2.55 && \
|
83 |
+
CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama_cpp_python==0.2.55 && \
|
84 |
pip install pillow && \
|
85 |
pip install diffusers && \
|
86 |
pip install accelerate && \
|
|
|
99 |
ENV LLAMA_CPP_LIB=/venv/lib/python3.10/site-packages/llama_cpp/libllama.so
|
100 |
ENV VIRTUAL_ENV=/venv
|
101 |
|
102 |
+
# Install Python and create a user
|
103 |
+
RUN apt-get update && apt-get install -y python3 python3-venv && apt-get clean && rm -rf /var/lib/apt/lists/* && \
|
104 |
+
useradd -m -u 1000 user
|
105 |
+
|
106 |
+
|
107 |
# Install Python and create a user
|
108 |
RUN apt-get update && apt-get install -y python3 python3-venv && apt-get clean && rm -rf /var/lib/apt/lists/* && \
|
109 |
useradd -m -u 1000 user
|
|
|
111 |
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
112 |
# Set working directory and user
|
113 |
COPY . /home/user/app
|
114 |
+
# Set working directory and user
|
115 |
+
COPY . /home/user/app
|
116 |
WORKDIR /home/user/app
|
117 |
RUN chown -R user:user /home/user/app/ && \
|
118 |
mkdir -p /home/user/app/output && \
|