Kroy commited on
Commit
63b4b93
1 Parent(s): e818e42

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +30 -0
  2. app.py +141 -0
  3. requirements.txt +10 -0
Dockerfile ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #Use the official Python 3.9 image
2
+ FROM python:3.8
3
+
4
+ # Set the working directory to /code
5
+ WORKDIR /code
6
+
7
+ # Copy the current directory contents into the container at /code
8
+ COPY ./requirements.txt /code/requirements.txt
9
+
10
+ # Install requirements.txt
11
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
12
+
13
+ # Set up a new user named "user" with user ID 1000
14
+ RUN useradd -m -u 1000 user
15
+ # Switch to the "user" user
16
+ USER user
17
+ # Set home to the user's home directory
18
+ ENV HOME=/home/user \
19
+ PATH=/home/user/.local/bin:$PATH
20
+
21
+ # Set the working directory to the user's home directory
22
+ WORKDIR $HOME/app
23
+
24
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
25
+ COPY --chown=user . $HOME/app
26
+
27
+ # Start the FastAPI app on port 7860, the default port expected by Spaces
28
+ # CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
29
+
30
+ CMD gunicorn -k uvicorn.workers.UvicornWorker --workers 2 --threads=2 --max-requests 512 --bind 0.0.0.0:7860 app:app
app.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ import os
3
+ from typing import Any, Union,Dict, List
4
+ import numpy as np
5
+ import io
6
+ import base64
7
+ import requests
8
+
9
+ import cv2
10
+ from PIL import Image
11
+
12
+ # Create a new FastAPI app instance
13
+ app = FastAPI()
14
+
15
+ # Initialize the text generation pipeline
16
+ # This function will be able to generate text
17
+ # given an input.
18
+ prototxtPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
19
+ weightsPath = os.path.sep.join(["face_detector",
20
+ "res10_300x300_ssd_iter_140000.caffemodel"])
21
+ net = cv2.dnn.readNet(prototxtPath, weightsPath)
22
+
23
+ # Define a function to handle the GET request at `/generate`
24
+ # The generate() function is defined as a FastAPI route that takes a
25
+ # string parameter called text. The function generates text based on the # input using the pipeline() object, and returns a JSON response
26
+ # containing the generated text under the key "output"
27
+ args = {
28
+ "method": "simple",
29
+ "blocks": 20,
30
+ "confidence": 0.5
31
+ }
32
+ def anonymize_face_simple(image, factor=3.0):
33
+ # automatically determine the size of the blurring kernel based
34
+ # on the spatial dimensions of the input image
35
+ (h, w) = image.shape[:2]
36
+ kW = int(w / factor)
37
+ kH = int(h / factor)
38
+
39
+ # ensure the width of the kernel is odd
40
+ if kW % 2 == 0:
41
+ kW -= 1
42
+
43
+ # ensure the height of the kernel is odd
44
+ if kH % 2 == 0:
45
+ kH -= 1
46
+
47
+ # apply a Gaussian blur to the input image using our computed
48
+ # kernel size
49
+ return cv2.GaussianBlur(image, (kW, kH), 0)
50
+
51
+ def anonymize_face_pixelate(image, blocks=3):
52
+ # divide the input image into NxN blocks
53
+ (h, w) = image.shape[:2]
54
+ xSteps = np.linspace(0, w, blocks + 1, dtype="int")
55
+ ySteps = np.linspace(0, h, blocks + 1, dtype="int")
56
+
57
+ # loop over the blocks in both the x and y direction
58
+ for i in range(1, len(ySteps)):
59
+ for j in range(1, len(xSteps)):
60
+ # compute the starting and ending (x, y)-coordinates
61
+ # for the current block
62
+ startX = xSteps[j - 1]
63
+ startY = ySteps[i - 1]
64
+ endX = xSteps[j]
65
+ endY = ySteps[i]
66
+
67
+ # extract the ROI using NumPy array slicing, compute the
68
+ # mean of the ROI, and then draw a rectangle with the
69
+ # mean RGB values over the ROI in the original image
70
+ roi = image[startY:endY, startX:endX]
71
+ (B, G, R) = [int(x) for x in cv2.mean(roi)[:3]]
72
+ cv2.rectangle(image, (startX, startY), (endX, endY),
73
+ (B, G, R), -1)
74
+
75
+ # return the pixelated blurred image
76
+ return image
77
+
78
+ @app.get("/generate")
79
+ def generate(path: str):
80
+ """
81
+ Using the text summarization pipeline from `transformers`, summerize text
82
+ from the given input text. The model used is `philschmid/bart-large-cnn-samsum`, which
83
+ can be found [here](<https://huggingface.co/philschmid/bart-large-cnn-samsum>).
84
+ """
85
+ r = requests.get(path, stream=True)
86
+ img = Image.open(io.BytesIO(r.content)).convert('RGB')
87
+ open_cv_image = np.array(img)
88
+ # Convert RGB to BGR
89
+ open_cv_image = open_cv_image[:, :, ::-1].copy() # numpy array (width, hight, 3)
90
+ image = open_cv_image # numpy array (width, hight, 3)
91
+ orig = image.copy()
92
+ (h, w) = image.shape[:2]
93
+
94
+ # construct a blob from the image
95
+ blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),(104.0, 177.0, 123.0))
96
+
97
+ # pass the blob through the network and obtain the face detections
98
+ logger.info("computing face detections...")
99
+ net.setInput(blob)
100
+ detections = net.forward()
101
+
102
+ # loop over the detections
103
+ for i in range(0, detections.shape[2]):
104
+ # extract the confidence (i.e., probability) associated with the
105
+ # detection
106
+ confidence = detections[0, 0, i, 2]
107
+
108
+ # filter out weak detections by ensuring the confidence is greater
109
+ # than the minimum confidence
110
+ if confidence > args["confidence"]:
111
+ # compute the (x, y)-coordinates of the bounding box for the
112
+ # object
113
+ box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
114
+ (startX, startY, endX, endY) = box.astype("int")
115
+
116
+ # extract the face ROI
117
+ face = image[startY:endY, startX:endX]
118
+
119
+ # check to see if we are applying the "simple" face blurring
120
+ # method
121
+ if args["method"] == "simple":
122
+ face = anonymize_face_simple(face, factor=3.0)
123
+
124
+ # otherwise, we must be applying the "pixelated" face
125
+ # anonymization method
126
+ else:
127
+ face = anonymize_face_pixelate(face,blocks=args["blocks"])
128
+
129
+ # store the blurred face in the output image
130
+ image[startY:endY, startX:endX] = face
131
+
132
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
133
+ img = Image.fromarray(image)
134
+
135
+ im_file = io.BytesIO()
136
+ img.save(im_file, format="PNG")
137
+ im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8")
138
+
139
+ # Return the generated text in a JSON response
140
+ return {"output": im_bytes}
141
+
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ opencv-python-headless==4.7.0.72
2
+ imageio==2.9.0
3
+ requests==2.27.*
4
+ pandas
5
+ Pillow==7.2.0
6
+ uvloop==0.15.2
7
+ uvicorn==0.13.4
8
+ httptools==0.2.0
9
+ fastapi==0.74.*
10
+ gunicorn==20.1.0