dockerfile big update cuda
Browse files- Dockerfile +22 -3
- api/main.py +6 -6
- requirements.txt +18 -0
Dockerfile
CHANGED
|
@@ -1,11 +1,30 @@
|
|
| 1 |
-
FROM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
WORKDIR /code
|
| 3 |
COPY ./requirements.txt /code/requirements.txt
|
| 4 |
-
RUN pip install --no-cache-dir --upgrade
|
|
|
|
| 5 |
RUN useradd -m -u 1000 user
|
| 6 |
USER user
|
| 7 |
ENV HOME=/home/user \
|
| 8 |
PATH=/home/user/.local/bin:$PATH
|
| 9 |
WORKDIR $HOME/server
|
| 10 |
COPY --chown=user . $HOME/server
|
| 11 |
-
|
|
|
|
|
|
| 1 |
+
FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu20.04
|
| 2 |
+
|
| 3 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
| 4 |
+
RUN apt update && \
|
| 5 |
+
apt install -y bash \
|
| 6 |
+
build-essential \
|
| 7 |
+
git \
|
| 8 |
+
git-lfs \
|
| 9 |
+
curl \
|
| 10 |
+
ca-certificates \
|
| 11 |
+
libsndfile1-dev \
|
| 12 |
+
libgl1 \
|
| 13 |
+
python3.8 \
|
| 14 |
+
python3-pip \
|
| 15 |
+
python3.8-venv && \
|
| 16 |
+
rm -rf /var/lib/apt/lists
|
| 17 |
+
|
| 18 |
+
RUN python3 -m venv /opt/venv
|
| 19 |
WORKDIR /code
|
| 20 |
COPY ./requirements.txt /code/requirements.txt
|
| 21 |
+
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
| 22 |
+
python3 -m pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
| 23 |
RUN useradd -m -u 1000 user
|
| 24 |
USER user
|
| 25 |
ENV HOME=/home/user \
|
| 26 |
PATH=/home/user/.local/bin:$PATH
|
| 27 |
WORKDIR $HOME/server
|
| 28 |
COPY --chown=user . $HOME/server
|
| 29 |
+
|
| 30 |
+
CMD ["uvicorn", "api.main:api", "--host", "0.0.0.0", "--port", "7860"]
|
api/main.py
CHANGED
|
@@ -6,7 +6,7 @@ api = __main.api()
|
|
| 6 |
def get_text_from_url(data:dict) -> dict:
|
| 7 |
__response=dict({"request_data":data})
|
| 8 |
try:
|
| 9 |
-
if data:
|
| 10 |
__response['texto']=__main.obtener_texto(from_url=data.get('url'))
|
| 11 |
else:
|
| 12 |
raise __main.exception(status_code = 401, datail=f"Datos mal formados:\n{data}")
|
|
@@ -21,7 +21,7 @@ def get_text_from_url(data:dict) -> dict:
|
|
| 21 |
def get_text_from_pdf(data:dict) -> dict:
|
| 22 |
__response=dict({"request_data":data})
|
| 23 |
try:
|
| 24 |
-
if data:
|
| 25 |
__response['texto']=__main.obtener_texto(from_pdf=data.get('pdf'))
|
| 26 |
else:
|
| 27 |
raise __main.exception(status_code = 401, datail=f"Datos mal formados:\n{data}")
|
|
@@ -36,7 +36,7 @@ def get_text_from_pdf(data:dict) -> dict:
|
|
| 36 |
def get_blocks(data:dict) -> dict:
|
| 37 |
__response=dict({"request_data":data})
|
| 38 |
try:
|
| 39 |
-
if data:
|
| 40 |
__response['original']=data.get('texto')
|
| 41 |
__response['bloques']=__main.generar_bloques(texto=data.get('texto'),
|
| 42 |
size=data.get('size'))
|
|
@@ -53,7 +53,7 @@ def get_blocks(data:dict) -> dict:
|
|
| 53 |
def get_traduccion(data:dict) -> dict:
|
| 54 |
__response=dict({"request_data":data})
|
| 55 |
try:
|
| 56 |
-
if data:
|
| 57 |
__response['original']= data.get('texto')
|
| 58 |
__response['traduccion']= __main.traducir(texto=data.get('texto'),
|
| 59 |
idioma=data.get('idioma'))
|
|
@@ -70,7 +70,7 @@ def get_traduccion(data:dict) -> dict:
|
|
| 70 |
def get_resumen(data:dict) -> dict:
|
| 71 |
__response=dict({"request_data":data})
|
| 72 |
try:
|
| 73 |
-
if data:
|
| 74 |
__response['original']= data.get('texto')
|
| 75 |
__response['resumen']= __main.resumir(texto=data.get('texto'))
|
| 76 |
else:
|
|
@@ -86,7 +86,7 @@ def get_resumen(data:dict) -> dict:
|
|
| 86 |
def get_text2img(data:dict) -> dict:
|
| 87 |
__response=dict({"request_data":data})
|
| 88 |
try:
|
| 89 |
-
if data:
|
| 90 |
__response['original']= data.get('texto')
|
| 91 |
__response['image']= __main.text_to_img(texto=data.get('texto'),
|
| 92 |
model=data.get('modelo'))
|
|
|
|
| 6 |
def get_text_from_url(data:dict) -> dict:
|
| 7 |
__response=dict({"request_data":data})
|
| 8 |
try:
|
| 9 |
+
if data and 'url' in data:
|
| 10 |
__response['texto']=__main.obtener_texto(from_url=data.get('url'))
|
| 11 |
else:
|
| 12 |
raise __main.exception(status_code = 401, datail=f"Datos mal formados:\n{data}")
|
|
|
|
| 21 |
def get_text_from_pdf(data:dict) -> dict:
|
| 22 |
__response=dict({"request_data":data})
|
| 23 |
try:
|
| 24 |
+
if data and 'pdf' in data:
|
| 25 |
__response['texto']=__main.obtener_texto(from_pdf=data.get('pdf'))
|
| 26 |
else:
|
| 27 |
raise __main.exception(status_code = 401, datail=f"Datos mal formados:\n{data}")
|
|
|
|
| 36 |
def get_blocks(data:dict) -> dict:
|
| 37 |
__response=dict({"request_data":data})
|
| 38 |
try:
|
| 39 |
+
if data and 'texto' in data and 'size' in data:
|
| 40 |
__response['original']=data.get('texto')
|
| 41 |
__response['bloques']=__main.generar_bloques(texto=data.get('texto'),
|
| 42 |
size=data.get('size'))
|
|
|
|
| 53 |
def get_traduccion(data:dict) -> dict:
|
| 54 |
__response=dict({"request_data":data})
|
| 55 |
try:
|
| 56 |
+
if data and 'texto' in data and 'idioma' in data:
|
| 57 |
__response['original']= data.get('texto')
|
| 58 |
__response['traduccion']= __main.traducir(texto=data.get('texto'),
|
| 59 |
idioma=data.get('idioma'))
|
|
|
|
| 70 |
def get_resumen(data:dict) -> dict:
|
| 71 |
__response=dict({"request_data":data})
|
| 72 |
try:
|
| 73 |
+
if data and 'texto' in data:
|
| 74 |
__response['original']= data.get('texto')
|
| 75 |
__response['resumen']= __main.resumir(texto=data.get('texto'))
|
| 76 |
else:
|
|
|
|
| 86 |
def get_text2img(data:dict) -> dict:
|
| 87 |
__response=dict({"request_data":data})
|
| 88 |
try:
|
| 89 |
+
if data and 'texto' in data and 'modelo' in data:
|
| 90 |
__response['original']= data.get('texto')
|
| 91 |
__response['image']= __main.text_to_img(texto=data.get('texto'),
|
| 92 |
model=data.get('modelo'))
|
requirements.txt
CHANGED
|
@@ -1,3 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
fastapi
|
| 2 |
pydantic
|
| 3 |
uvicorn
|
|
@@ -9,3 +25,5 @@ transformers[sentencepiece]
|
|
| 9 |
diffusers
|
| 10 |
diffusers[torch]
|
| 11 |
diffusers[flax]
|
|
|
|
|
|
|
|
|
| 1 |
+
torch
|
| 2 |
+
torchvision
|
| 3 |
+
torchaudio
|
| 4 |
+
invisible_watermark
|
| 5 |
+
datasets
|
| 6 |
+
hf-doc-builder
|
| 7 |
+
huggingface-hub
|
| 8 |
+
Jinja2
|
| 9 |
+
librosa
|
| 10 |
+
numpy
|
| 11 |
+
scipy
|
| 12 |
+
tensorboard
|
| 13 |
+
omegaconf
|
| 14 |
+
pytorch-lightning
|
| 15 |
+
xformers
|
| 16 |
+
|
| 17 |
fastapi
|
| 18 |
pydantic
|
| 19 |
uvicorn
|
|
|
|
| 25 |
diffusers
|
| 26 |
diffusers[torch]
|
| 27 |
diffusers[flax]
|
| 28 |
+
accelerate
|
| 29 |
+
safetensors
|