Jon Solow commited on
Commit
8e681e8
1 Parent(s): 401d73f

Copy setup from yahoo-ff-dev and grubguesser-api

Browse files
.gitignore ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
161
+
162
+
163
+ *.json
164
+ *.png
165
+ *.csv
Dockerfile CHANGED
@@ -1,14 +1,27 @@
1
- # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
- # you will also find guides on how best to write your Dockerfile
3
 
4
- FROM python:3.9
 
5
 
6
- WORKDIR /code
 
 
 
 
7
 
8
- COPY ./requirements.txt /code/requirements.txt
9
 
10
- RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
 
 
11
 
12
- COPY . .
 
 
13
 
14
- CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
 
 
 
 
 
1
+ FROM python:3.8.13 as base
 
2
 
3
+ # RUN apt-get update && \
4
+ # apt-get -y install curl gunicorn python3
5
 
6
+ FROM base as python-base
7
+ RUN curl -sSL https://bootstrap.pypa.io/get-pip.py | python3 -
8
+ ENV APP_BASE_PATH="/opt/src"
9
+ RUN mkdir -p ${APP_BASE_PATH}
10
+ ENV PYTHONPATH="${APP_BASE_PATH}:${PYTHONPATH}"
11
 
 
12
 
13
+ FROM python-base as pip-tools-install
14
+ RUN python3 -m pip install pip-tools
15
+ WORKDIR ${APP_BASE_PATH}
16
 
17
+ FROM python-base as pip-install
18
+ COPY src/requirements.txt ./requirements.txt
19
+ RUN python3 -m pip install -r requirements.txt
20
 
21
+ FROM pip-install as copy-src
22
+ COPY ./src /opt/src
23
+ WORKDIR /opt/src
24
+
25
+
26
+ FROM copy-src as production
27
+ CMD ["python3", "-m", "uvicorn", "main:app", "--workers", "1", "--host", "0.0.0.0", "--port", "7860"]
run_dev_container.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ docker buildx build \
3
+ -f Dockerfile \
4
+ --tag dev-$(basename `git rev-parse --show-toplevel`) \
5
+ . \
6
+ && docker run -it \
7
+ --rm \
8
+ --mount type=bind,source="$(pwd)/src",target=/opt/src \
9
+ dev-$(basename `git rev-parse --show-toplevel`) \
10
+ bash
run_pip_tools.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ CONTAINER_NAME=dev-pip-tools-$(basename `git rev-parse --show-toplevel`)
3
+
4
+ docker build \
5
+ -f Dockerfile \
6
+ --target pip-tools-install \
7
+ --tag $CONTAINER_NAME \
8
+ . \
9
+ && docker run -it \
10
+ --rm \
11
+ --mount type=bind,source="$(pwd)/src",target=/opt/src \
12
+ $CONTAINER_NAME \
13
+ pip-compile requirements.in --resolver=backtracking
14
+
15
+
src/config.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ import os
2
+
3
+ MODEL_HDF5_PATH = os.getenv("MODEL_HDF5_PATH")
4
+
src/handler.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import UploadFile
2
+ from skimage.io import imread
3
+ from io import BytesIO
4
+ import numpy as np
5
+ import urllib
6
+ from keras.preprocessing.image import array_to_img, img_to_array
7
+
8
+ from PIL import Image
9
+
10
+ def preprocess(img: np.ndarray) -> np.ndarray:
11
+ img = array_to_img(img, scale=False)
12
+ img = img.resize((224, 224))
13
+ img = img_to_array(img)
14
+ return img / 255.0
15
+
16
+
17
+ def handle_url(url: str) -> np.ndarray:
18
+ try:
19
+ img_data = imread(url)
20
+ except Exception:
21
+ req = urllib.request.Request(url, headers={"User-Agent": "Magic Browser"})
22
+ con = urllib.request.urlopen(req)
23
+ img_data = imread(con)
24
+ processed_img = preprocess(img_data)
25
+ img_array = np.array([processed_img])
26
+ return img_array
27
+
28
+
29
+ def read_imagefile(file):
30
+ file_bytes = BytesIO(file)
31
+ image = Image.open(file_bytes)
32
+ return image
33
+
34
+
35
+ def handle_file(file: UploadFile) -> np.ndarray:
36
+ img_data = read_imagefile(file)
37
+ processed_img = preprocess(img_data)
38
+ img_array = np.array([processed_img])
39
+ return img_array
src/labels.txt ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apple pie
2
+ Baby back ribs
3
+ Baklava
4
+ Beef carpaccio
5
+ Beef tartare
6
+ Beet salad
7
+ Beignets
8
+ Bibimbap
9
+ Bread pudding
10
+ Breakfast burrito
11
+ Bruschetta
12
+ Caesar salad
13
+ Cannoli
14
+ Caprese salad
15
+ Carrot cake
16
+ Ceviche
17
+ Cheesecake
18
+ Cheese plate
19
+ Chicken curry
20
+ Chicken quesadilla
21
+ Chicken wings
22
+ Chocolate cake
23
+ Chocolate mousse
24
+ Churros
25
+ Clam chowder
26
+ Club sandwich
27
+ Crab cakes
28
+ Creme brulee
29
+ Croque madame
30
+ Cup cakes
31
+ Deviled eggs
32
+ Donuts
33
+ Dumplings
34
+ Edamame
35
+ Eggs benedict
36
+ Escargots
37
+ Falafel
38
+ Filet mignon
39
+ Fish and chips
40
+ Foie gras
41
+ French fries
42
+ French onion soup
43
+ French toast
44
+ Fried calamari
45
+ Fried rice
46
+ Frozen yogurt
47
+ Garlic bread
48
+ Gnocchi
49
+ Greek salad
50
+ Grilled cheese sandwich
51
+ Grilled salmon
52
+ Guacamole
53
+ Gyoza
54
+ Hamburger
55
+ Hot and sour soup
56
+ Hot dog
57
+ Huevos rancheros
58
+ Hummus
59
+ Ice cream
60
+ Lasagna
61
+ Lobster bisque
62
+ Lobster roll sandwich
63
+ Macaroni and cheese
64
+ Macarons
65
+ Miso soup
66
+ Mussels
67
+ Nachos
68
+ Omelette
69
+ Onion rings
70
+ Oysters
71
+ Pad thai
72
+ Paella
73
+ Pancakes
74
+ Panna cotta
75
+ Peking duck
76
+ Pho
77
+ Pizza
78
+ Pork chop
79
+ Poutine
80
+ Prime rib
81
+ Pulled pork sandwich
82
+ Ramen
83
+ Ravioli
84
+ Red velvet cake
85
+ Risotto
86
+ Samosa
87
+ Sashimi
88
+ Scallops
89
+ Seaweed salad
90
+ Shrimp and grits
91
+ Spaghetti bolognese
92
+ Spaghetti carbonara
93
+ Spring rolls
94
+ Steak
95
+ Strawberry shortcake
96
+ Sushi
97
+ Tacos
98
+ Takoyaki
99
+ Tiramisu
100
+ Tuna tartare
101
+ Waffles
src/main.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File, HTTPException, status, Header
2
+
3
+ from handler import handle_file, handle_url
4
+ from predict import predict_model
5
+
6
+ app = FastAPI()
7
+
8
+
9
+ @app.get("/")
10
+ async def root():
11
+ return {"message": "Hello World"}
12
+
13
+
14
+ @app.get("/predict_url")
15
+ async def predict_url(url: str):
16
+ model_input = handle_url(url)
17
+ model_output = predict_model(model_input)
18
+ return model_output
19
+
20
+
21
+ @app.get("/healthcheck")
22
+ async def healthcheck():
23
+ return {"status": "alive"}
24
+
25
+
26
+ def validate_image_content(content_type: str = Header(...)):
27
+ """Require request MIME-type to be image/*"""
28
+
29
+ content_main_type = content_type.split("/")[0]
30
+ if content_main_type != "image":
31
+ raise HTTPException(
32
+ status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
33
+ f"Unsupported media type: {content_type}."
34
+ " It must be image/",
35
+ )
36
+
37
+
38
+ @app.post("/predict_file")
39
+ async def predict_file(upload_file: UploadFile = File(...)):
40
+ validate_image_content(upload_file.content_type)
41
+ model_input = handle_file(await upload_file.read())
42
+ model_output = predict_model(model_input)
43
+ return model_output
src/model.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from keras.utils.data_utils import get_file
3
+ from config import MODEL_HDF5_PATH
4
+
5
+
6
+ LABELS = np.loadtxt("labels.txt", dtype=object, delimiter="\n")
7
+
8
+
9
+ def initialize_model():
10
+ # import the necessary packages
11
+ from keras.models import Sequential
12
+ from keras.layers import BatchNormalization
13
+ from keras.layers.convolutional import Conv2D, MaxPooling2D
14
+ from keras.layers.core import Flatten, Dropout, Dense
15
+
16
+ # CONV => RELU => POOL
17
+ cnn = Sequential()
18
+ inputShape = (224, 224, 3)
19
+ chanDim = -1
20
+ classes = 101
21
+ # Sequence of Convolution (scan filters), BatchNormalization (normalize numbers),
22
+ # MaxPooling (shrink tensor down), Dropout (prevent overfit)
23
+ cnn.add(
24
+ Conv2D(32, (3, 3), padding="same", input_shape=inputShape, activation="relu")
25
+ )
26
+ cnn.add(BatchNormalization(axis=chanDim))
27
+ cnn.add(MaxPooling2D(pool_size=(3, 3)))
28
+ cnn.add(Dropout(rate=0.25))
29
+ cnn.add(Conv2D(64, (3, 3), padding="same", activation="relu"))
30
+ cnn.add(BatchNormalization(axis=chanDim))
31
+ cnn.add(Conv2D(64, (3, 3), padding="same", activation="relu"))
32
+ cnn.add(BatchNormalization(axis=chanDim))
33
+ cnn.add(MaxPooling2D(pool_size=(2, 2)))
34
+ cnn.add(Dropout(rate=0.25))
35
+ cnn.add(Conv2D(128, (3, 3), padding="same", activation="relu"))
36
+ cnn.add(BatchNormalization(axis=chanDim))
37
+ cnn.add(Conv2D(128, (3, 3), padding="same", activation="relu"))
38
+ cnn.add(BatchNormalization(axis=chanDim))
39
+ cnn.add(MaxPooling2D(pool_size=(2, 2)))
40
+ cnn.add(Dropout(rate=0.25))
41
+ cnn.add(Flatten())
42
+ cnn.add(Dense(1024, activation="relu"))
43
+ cnn.add(BatchNormalization())
44
+ cnn.add(Dropout(rate=0.5))
45
+ # softmax classifier
46
+ cnn.add(Dense(classes, activation="softmax"))
47
+
48
+ return cnn
49
+
50
+
51
+ CNN = initialize_model()
52
+
53
+
54
+ CNN.load_weights(
55
+ get_file(
56
+ "weights.hdf5",
57
+ MODEL_HDF5_PATH,
58
+ cache_dir="."
59
+ )
60
+ )
src/predict.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from model import CNN, LABELS
2
+
3
+
4
+
5
+ def predict_model(img_array):
6
+ class_prob = CNN.predict(img_array)
7
+ top_values_index = (-class_prob).argsort()[0][:10]
8
+ top_guesses = [LABELS[i].title() for i in top_values_index]
9
+
10
+ return top_guesses
src/pyproject.toml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [tool.black]
2
+ line-length = 120
src/requirements.in ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ keras
2
+ tensorflow-cpu
3
+ fastapi
4
+ uvicorn
5
+ scikit-image
6
+ numpy
7
+ python-multipart
src/requirements.txt ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # This file is autogenerated by pip-compile with Python 3.8
3
+ # by the following command:
4
+ #
5
+ # pip-compile requirements.in
6
+ #
7
+ absl-py==1.4.0
8
+ # via
9
+ # tensorboard
10
+ # tensorflow-cpu
11
+ anyio==3.6.2
12
+ # via starlette
13
+ astunparse==1.6.3
14
+ # via tensorflow-cpu
15
+ cachetools==5.3.0
16
+ # via google-auth
17
+ certifi==2023.5.7
18
+ # via requests
19
+ charset-normalizer==3.1.0
20
+ # via requests
21
+ click==8.1.3
22
+ # via uvicorn
23
+ fastapi==0.95.2
24
+ # via -r requirements.in
25
+ flatbuffers==23.5.9
26
+ # via tensorflow-cpu
27
+ gast==0.4.0
28
+ # via tensorflow-cpu
29
+ google-auth==2.18.1
30
+ # via
31
+ # google-auth-oauthlib
32
+ # tensorboard
33
+ google-auth-oauthlib==1.0.0
34
+ # via tensorboard
35
+ google-pasta==0.2.0
36
+ # via tensorflow-cpu
37
+ grpcio==1.54.2
38
+ # via
39
+ # tensorboard
40
+ # tensorflow-cpu
41
+ h11==0.14.0
42
+ # via uvicorn
43
+ h5py==3.8.0
44
+ # via tensorflow-cpu
45
+ idna==3.4
46
+ # via
47
+ # anyio
48
+ # requests
49
+ imageio==2.28.1
50
+ # via scikit-image
51
+ importlib-metadata==6.6.0
52
+ # via markdown
53
+ jax==0.4.10
54
+ # via tensorflow-cpu
55
+ keras==2.12.0
56
+ # via
57
+ # -r requirements.in
58
+ # tensorflow-cpu
59
+ lazy-loader==0.2
60
+ # via scikit-image
61
+ libclang==16.0.0
62
+ # via tensorflow-cpu
63
+ markdown==3.4.3
64
+ # via tensorboard
65
+ markupsafe==2.1.2
66
+ # via werkzeug
67
+ ml-dtypes==0.1.0
68
+ # via jax
69
+ networkx==3.1
70
+ # via scikit-image
71
+ numpy==1.23.5
72
+ # via
73
+ # -r requirements.in
74
+ # h5py
75
+ # imageio
76
+ # jax
77
+ # ml-dtypes
78
+ # opt-einsum
79
+ # pywavelets
80
+ # scikit-image
81
+ # scipy
82
+ # tensorboard
83
+ # tensorflow-cpu
84
+ # tifffile
85
+ oauthlib==3.2.2
86
+ # via requests-oauthlib
87
+ opt-einsum==3.3.0
88
+ # via
89
+ # jax
90
+ # tensorflow-cpu
91
+ packaging==23.1
92
+ # via
93
+ # scikit-image
94
+ # tensorflow-cpu
95
+ pillow==9.5.0
96
+ # via
97
+ # imageio
98
+ # scikit-image
99
+ protobuf==4.23.1
100
+ # via
101
+ # tensorboard
102
+ # tensorflow-cpu
103
+ pyasn1==0.5.0
104
+ # via
105
+ # pyasn1-modules
106
+ # rsa
107
+ pyasn1-modules==0.3.0
108
+ # via google-auth
109
+ pydantic==1.10.7
110
+ # via fastapi
111
+ python-multipart==0.0.6
112
+ # via -r requirements.in
113
+ pywavelets==1.4.1
114
+ # via scikit-image
115
+ requests==2.30.0
116
+ # via
117
+ # requests-oauthlib
118
+ # tensorboard
119
+ requests-oauthlib==1.3.1
120
+ # via google-auth-oauthlib
121
+ rsa==4.9
122
+ # via google-auth
123
+ scikit-image==0.20.0
124
+ # via -r requirements.in
125
+ scipy==1.9.1
126
+ # via
127
+ # jax
128
+ # scikit-image
129
+ six==1.16.0
130
+ # via
131
+ # astunparse
132
+ # google-auth
133
+ # google-pasta
134
+ # tensorflow-cpu
135
+ sniffio==1.3.0
136
+ # via anyio
137
+ starlette==0.27.0
138
+ # via fastapi
139
+ tensorboard==2.12.3
140
+ # via tensorflow-cpu
141
+ tensorboard-data-server==0.7.0
142
+ # via tensorboard
143
+ tensorflow-cpu==2.12.0
144
+ # via -r requirements.in
145
+ tensorflow-estimator==2.12.0
146
+ # via tensorflow-cpu
147
+ tensorflow-io-gcs-filesystem==0.32.0
148
+ # via tensorflow-cpu
149
+ termcolor==2.3.0
150
+ # via tensorflow-cpu
151
+ tifffile==2023.4.12
152
+ # via scikit-image
153
+ typing-extensions==4.5.0
154
+ # via
155
+ # pydantic
156
+ # starlette
157
+ # tensorflow-cpu
158
+ urllib3==1.26.15
159
+ # via
160
+ # google-auth
161
+ # requests
162
+ uvicorn==0.22.0
163
+ # via -r requirements.in
164
+ werkzeug==2.3.4
165
+ # via tensorboard
166
+ wheel==0.40.0
167
+ # via
168
+ # astunparse
169
+ # tensorboard
170
+ wrapt==1.14.1
171
+ # via tensorflow-cpu
172
+ zipp==3.15.0
173
+ # via importlib-metadata
174
+
175
+ # The following packages are considered to be unsafe in a requirements file:
176
+ # setuptools