Spaces:
Runtime error
Runtime error
Gradio app
Browse files- Dockerfile +9 -4
- predict.py +61 -0
- requirements_hf.txt +1 -0
Dockerfile
CHANGED
@@ -1,10 +1,15 @@
|
|
1 |
FROM docker.io/jackrio/bae_repo
|
2 |
|
3 |
-
|
|
|
|
|
4 |
USER root
|
5 |
|
6 |
-
|
|
|
|
|
|
|
|
|
7 |
WORKDIR /bae
|
8 |
EXPOSE 8080
|
9 |
-
CMD ["
|
10 |
-
"--workers=1", "--threads=4", "--bind=0.0.0.0:8080"]
|
|
|
1 |
FROM docker.io/jackrio/bae_repo
|
2 |
|
3 |
+
COPY ./requirements.txt /code/requirements.txt
|
4 |
+
|
5 |
+
|
6 |
USER root
|
7 |
|
8 |
+
COPY ./requirements.txt /bae/requirements_hf.txt
|
9 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements_hf.txt
|
10 |
+
|
11 |
+
COPY predict.py /bae/
|
12 |
+
|
13 |
WORKDIR /bae
|
14 |
EXPOSE 8080
|
15 |
+
CMD ["python", "predict.py"]
|
|
predict.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import albumentations as A
|
2 |
+
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
from albumentations.pytorch import ToTensorV2
|
6 |
+
from models.model_zoo import BoneAgeEstModelZoo
|
7 |
+
|
8 |
+
device = "cpu"
|
9 |
+
def initialize_model():
|
10 |
+
# Load model
|
11 |
+
model = BoneAgeEstModelZoo(branch="gender", pretrained=False, lr=0.001).load_from_checkpoint(
|
12 |
+
"output/inception_1024/epoch14_inception_1024_kaggle.ckpt")
|
13 |
+
model.model.eval()
|
14 |
+
print("Loaded model")
|
15 |
+
|
16 |
+
# Check for GPU
|
17 |
+
model = model.to(device)
|
18 |
+
|
19 |
+
return model
|
20 |
+
|
21 |
+
|
22 |
+
# Preprocessing and postprocessing
|
23 |
+
transform = A.Compose([
|
24 |
+
A.Resize(width=1024, height=1024),
|
25 |
+
A.CLAHE(),
|
26 |
+
A.Normalize(),
|
27 |
+
ToTensorV2(),
|
28 |
+
])
|
29 |
+
|
30 |
+
|
31 |
+
def predict(image, gender):
|
32 |
+
model = initialize_model()
|
33 |
+
|
34 |
+
processed_image = transform(image=np.array(image, dtype=np.uint8))['image']
|
35 |
+
processed_image = processed_image.unsqueeze(0)
|
36 |
+
processed_image = processed_image.to(device)
|
37 |
+
gender = torch.tensor(int(gender)).unsqueeze(0).unsqueeze(1).to(device)
|
38 |
+
|
39 |
+
scans = {
|
40 |
+
'image': processed_image,
|
41 |
+
'gender': gender
|
42 |
+
}
|
43 |
+
preds = model(scans)
|
44 |
+
return int(preds)
|
45 |
+
|
46 |
+
|
47 |
+
def run():
|
48 |
+
image_input = gr.inputs.Image(type="pil", label="Input PNG image")
|
49 |
+
gender_input = gr.inputs.Checkbox(label="Gender 0 Male, 1 Female")
|
50 |
+
output = gr.outputs.Textbox(label="Predicted Age")
|
51 |
+
demo = gr.Interface(
|
52 |
+
fn=predict,
|
53 |
+
inputs=[image_input, gender_input],
|
54 |
+
outputs=output,
|
55 |
+
)
|
56 |
+
|
57 |
+
demo.launch(server_name="0.0.0.0", server_port=8080)
|
58 |
+
|
59 |
+
|
60 |
+
if __name__ == "__main__":
|
61 |
+
run()
|
requirements_hf.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
gradio
|