LogicGoInfotechSpaces commited on
Commit
e5f91f5
·
1 Parent(s): 83f52c2

chore(space): Dockerize FastAPI app; avoid Gradio/xformers on server; add README

Browse files
Files changed (4) hide show
  1. Dockerfile +32 -0
  2. README.md +24 -0
  3. app.py +23 -21
  4. infer_full.py +0 -1
Dockerfile ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # syntax=docker/dockerfile:1
2
+
3
+ FROM python:3.10-slim
4
+
5
+ ENV PIP_NO_CACHE_DIR=1 \
6
+ PYTHONDONTWRITEBYTECODE=1 \
7
+ PYTHONUNBUFFERED=1 \
8
+ HF_HUB_ENABLE_HF_TRANSFER=1
9
+
10
+ WORKDIR /app
11
+
12
+ # System deps
13
+ RUN apt-get update && apt-get install -y --no-install-recommends \
14
+ git \
15
+ gcc \
16
+ libgl1 \
17
+ && rm -rf /var/lib/apt/lists/*
18
+
19
+ # Copy code
20
+ COPY . /app
21
+
22
+ # Install deps
23
+ RUN python -m pip install --upgrade pip && \
24
+ python -m pip install -r requirements.txt && \
25
+ # CPU-only torch to avoid CUDA/xformers issues on Spaces CPU images
26
+ python -m pip install --index-url https://download.pytorch.org/whl/cpu torch torchvision torchaudio --upgrade --force-reinstall && \
27
+ # Ensure xformers is not installed
28
+ python -m pip uninstall -y xformers || true
29
+
30
+ EXPOSE 7860
31
+
32
+ CMD ["python", "-m", "uvicorn", "server:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -12,3 +12,27 @@ short_description: hair stable
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
15
+
16
+ # Hair_stable_new
17
+
18
+ ## API (FastAPI)
19
+
20
+ Base URL (Space): <your Space URL>
21
+
22
+ Bearer Token: `logicgo@123`
23
+
24
+ Endpoints:
25
+ - GET /health
26
+ - POST /upload (form-data: image=File) [Bearer]
27
+ - POST /get-hairswap (JSON) [Bearer]
28
+ - GET /download/{filename} [Bearer]
29
+ - GET /logs [Bearer]
30
+
31
+ Run locally:
32
+ ```bash
33
+ python3 -m uvicorn server:app --host 0.0.0.0 --port 7861
34
+ ```
35
+
36
+ On Hugging Face Spaces (Docker):
37
+ - Space type: Docker
38
+ - This repo includes a Dockerfile which starts the FastAPI app on port 7860.
app.py CHANGED
@@ -1,4 +1,7 @@
1
- import gradio as gr
 
 
 
2
  import torch
3
  from PIL import Image
4
  import numpy as np
@@ -161,23 +164,22 @@ def model_call(id_image, ref_hair, converter_scale, scale, guidance_scale, contr
161
  return id_image_bald, image
162
 
163
  # Create a Gradio interface
164
- iface = gr.Interface(
165
- fn=model_call,
166
- inputs=[
167
- gr.Image(label="ID Image"),
168
- gr.Image(label="Reference Hair"),
169
- gr.Slider(minimum=0.5, maximum=1.5, value=1, label="Converter Scale"),
170
- gr.Slider(minimum=0.0, maximum=3.0, value=1.0, label="Hair Encoder Scale"),
171
- gr.Slider(minimum=1.1, maximum=3.0, value=1.5, label="CFG"),
172
- gr.Slider(minimum=0.1, maximum=2.0, value=1, label="Latent IdentityNet Scale"),
173
- ],
174
- outputs=[
175
- gr.Image(type="pil", label="Bald Result"),
176
- gr.Image(type="pil", label="Transfer Result"),
177
- ],
178
- title="Hair Transfer Demo",
179
- description="In general, aligned faces work well, but can also be used on non-aligned faces, and you need to resize to 512 * 512"
180
- )
181
-
182
- # Launch the Gradio interface
183
- iface.queue().launch(server_name='0.0.0.0', server_port=7860, share=True)
 
1
+ try:
2
+ import gradio as gr
3
+ except Exception:
4
+ gr = None
5
  import torch
6
  from PIL import Image
7
  import numpy as np
 
164
  return id_image_bald, image
165
 
166
  # Create a Gradio interface
167
+ if gr is not None:
168
+ iface = gr.Interface(
169
+ fn=model_call,
170
+ inputs=[
171
+ gr.Image(label="ID Image"),
172
+ gr.Image(label="Reference Hair"),
173
+ gr.Slider(minimum=0.5, maximum=1.5, value=1, label="Converter Scale"),
174
+ gr.Slider(minimum=0.0, maximum=3.0, value=1.0, label="Hair Encoder Scale"),
175
+ gr.Slider(minimum=1.1, maximum=3.0, value=1.5, label="CFG"),
176
+ gr.Slider(minimum=0.1, maximum=2.0, value=1, label="Latent IdentityNet Scale"),
177
+ ],
178
+ outputs=[
179
+ gr.Image(type="pil", label="Bald Result"),
180
+ gr.Image(type="pil", label="Transfer Result"),
181
+ ],
182
+ title="Hair Transfer Demo",
183
+ description="In general, aligned faces work well, but can also be used on non-aligned faces, and you need to resize to 512 * 512"
184
+ )
185
+ iface.queue().launch(server_name='0.0.0.0', server_port=7860, share=True)
 
infer_full.py CHANGED
@@ -1,4 +1,3 @@
1
- import gradio as gr
2
  import torch
3
  from PIL import Image
4
  import numpy as np
 
 
1
  import torch
2
  from PIL import Image
3
  import numpy as np