leandro
commited on
Commit
·
ff98df2
1
Parent(s):
fd82673
cleanup
Browse files- Dockerfile +1 -3
- jupyter/Dockerfile +0 -16
- jupyter/jupyter_server.py +0 -176
- jupyter/jupyter_kernel.py → jupyter_kernel.py +0 -0
- test.py +10 -0
Dockerfile
CHANGED
|
@@ -18,6 +18,4 @@ RUN pip install pandas scikit-learn matplotlib seaborn
|
|
| 18 |
|
| 19 |
|
| 20 |
COPY --chown=user . /app
|
| 21 |
-
|
| 22 |
-
# CMD ["pwd", "&&", "gunicorn", "-w", "1", "-b", "0.0.0.0:7860", "jupyter.jupyter_server:app"]
|
| 23 |
-
CMD cd jupyter && gunicorn -w 1 -b 0.0.0.0:7860 jupyter_kernel:app
|
|
|
|
| 18 |
|
| 19 |
|
| 20 |
COPY --chown=user . /app
|
| 21 |
+
CMD gunicorn -w 1 -b 0.0.0.0:7860 jupyter_kernel:app
|
|
|
|
|
|
jupyter/Dockerfile
DELETED
|
@@ -1,16 +0,0 @@
|
|
| 1 |
-
FROM python:3.9-slim
|
| 2 |
-
|
| 3 |
-
WORKDIR /app
|
| 4 |
-
|
| 5 |
-
# Minimal requirements
|
| 6 |
-
RUN pip install --upgrade pip flask docker requests ipython jupyter-client ipykernel
|
| 7 |
-
RUN ipython kernel install --name "python3" --user
|
| 8 |
-
|
| 9 |
-
# Extra requirements
|
| 10 |
-
RUN pip install pandas scikit-learn matplotlib seaborn
|
| 11 |
-
|
| 12 |
-
COPY jupyter_kernel.py .
|
| 13 |
-
|
| 14 |
-
EXPOSE 5000
|
| 15 |
-
|
| 16 |
-
ENTRYPOINT ["python", "jupyter_kernel.py"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
jupyter/jupyter_server.py
DELETED
|
@@ -1,176 +0,0 @@
|
|
| 1 |
-
from flask import Flask, request, jsonify
|
| 2 |
-
import os
|
| 3 |
-
import uuid
|
| 4 |
-
import time
|
| 5 |
-
import docker
|
| 6 |
-
import requests
|
| 7 |
-
import atexit
|
| 8 |
-
import socket
|
| 9 |
-
import argparse
|
| 10 |
-
import logging
|
| 11 |
-
from pydantic import BaseModel, Field, ValidationError
|
| 12 |
-
|
| 13 |
-
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 14 |
-
|
| 15 |
-
app = Flask(__name__)
|
| 16 |
-
app.logger.setLevel(logging.INFO)
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
# CLI function to parse arguments
|
| 20 |
-
def parse_args():
|
| 21 |
-
parser = argparse.ArgumentParser(description="Jupyter server.")
|
| 22 |
-
parser.add_argument('--n_instances', type=int, help="Number of Jupyter instances.")
|
| 23 |
-
parser.add_argument('--n_cpus', type=int, default=2, help="Number of CPUs per Jupyter instance.")
|
| 24 |
-
parser.add_argument('--mem', type=str, default="2g", help="Amount of memory per Jupyter instance.")
|
| 25 |
-
parser.add_argument('--execution_timeout', type=int, default=10, help="Timeout period for a code execution.")
|
| 26 |
-
parser.add_argument('--port', type=int, default=5001, help="Port of main server")
|
| 27 |
-
return parser.parse_args()
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
def get_unused_port(start=50000, end=65535, exclusion=[]):
|
| 31 |
-
for port in range(start, end + 1):
|
| 32 |
-
if port in exclusion:
|
| 33 |
-
continue
|
| 34 |
-
try:
|
| 35 |
-
sock = socket.socket()
|
| 36 |
-
sock.bind(("", port))
|
| 37 |
-
sock.listen(1)
|
| 38 |
-
sock.close()
|
| 39 |
-
return port
|
| 40 |
-
except OSError:
|
| 41 |
-
continue
|
| 42 |
-
raise IOError("No free ports available in range {}-{}".format(start, end))
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
def create_kernel_containers(n_instances, n_cpus=2, mem="2g", execution_timeout=10):
|
| 46 |
-
|
| 47 |
-
docker_client = docker.from_env()
|
| 48 |
-
app.logger.info("Buidling docker image...")
|
| 49 |
-
image, logs = docker_client.images.build(path=current_dir, tag='jupyter-kernel:latest')
|
| 50 |
-
app.logger.info("Building docker image complete.")
|
| 51 |
-
|
| 52 |
-
containers = []
|
| 53 |
-
port_exclusion = []
|
| 54 |
-
for i in range(n_instances):
|
| 55 |
-
|
| 56 |
-
free_port = get_unused_port(exclusion=port_exclusion)
|
| 57 |
-
port_exclusion.append(free_port) # it takes a while to startup so we don't use the same port twice
|
| 58 |
-
app.logger.info(f"Starting container {i} on port {free_port}...")
|
| 59 |
-
container = docker_client.containers.run(
|
| 60 |
-
"jupyter-kernel:latest",
|
| 61 |
-
detach=True,
|
| 62 |
-
mem_limit=mem,
|
| 63 |
-
cpuset_cpus=f"{i*n_cpus}-{(i+1)*n_cpus-1}", # Limit to CPU cores 0 and 1
|
| 64 |
-
remove=True,
|
| 65 |
-
ports={'5000/tcp': free_port},
|
| 66 |
-
environment={"EXECUTION_TIMEOUT": execution_timeout},
|
| 67 |
-
)
|
| 68 |
-
|
| 69 |
-
containers.append({"container": container, "port": free_port})
|
| 70 |
-
|
| 71 |
-
start_time = time.time()
|
| 72 |
-
|
| 73 |
-
containers_ready = []
|
| 74 |
-
|
| 75 |
-
while len(containers_ready) < n_instances:
|
| 76 |
-
app.logger.info("Pinging Jupyter containers to check readiness.")
|
| 77 |
-
if time.time() - start_time > 60:
|
| 78 |
-
raise TimeoutError("Container took too long to startup.")
|
| 79 |
-
for i in range(n_instances):
|
| 80 |
-
if i in containers_ready:
|
| 81 |
-
continue
|
| 82 |
-
url = f"http://localhost:{containers[i]['port']}/health"
|
| 83 |
-
try:
|
| 84 |
-
# TODO: dedicated health endpoint
|
| 85 |
-
response = requests.get(url)
|
| 86 |
-
if response.status_code == 200:
|
| 87 |
-
containers_ready.append(i)
|
| 88 |
-
except Exception as e:
|
| 89 |
-
# Catch any other errors that might occur
|
| 90 |
-
pass
|
| 91 |
-
time.sleep(0.5)
|
| 92 |
-
app.logger.info("Containers ready!")
|
| 93 |
-
return containers
|
| 94 |
-
|
| 95 |
-
def shutdown_cleanup():
|
| 96 |
-
app.logger.info("Shutting down. Stopping and removing all containers...")
|
| 97 |
-
for instance in app.containers:
|
| 98 |
-
try:
|
| 99 |
-
instance['container'].stop()
|
| 100 |
-
instance['container'].remove()
|
| 101 |
-
except Exception as e:
|
| 102 |
-
app.logger.info(f"Error stopping/removing container: {str(e)}")
|
| 103 |
-
app.logger.info("All containers stopped and removed.")
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
class ServerRequest(BaseModel):
|
| 107 |
-
code: str = Field(..., example="print('Hello World!')")
|
| 108 |
-
instance_id: int = Field(0, example=0)
|
| 109 |
-
restart: bool = Field(False, example=False)
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
@app.route('/execute', methods=['POST'])
|
| 113 |
-
def execute_code():
|
| 114 |
-
try:
|
| 115 |
-
input = ServerRequest(**request.json)
|
| 116 |
-
except ValidationError as e:
|
| 117 |
-
return jsonify(e.errors()), 400
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
port = app.containers[input.instance_id]["port"]
|
| 121 |
-
|
| 122 |
-
app.logger.info(f"Received request for instance {input.instance_id} (port={port}).")
|
| 123 |
-
|
| 124 |
-
try:
|
| 125 |
-
if input.restart:
|
| 126 |
-
response = requests.post(f'http://localhost:{port}/restart', json={})
|
| 127 |
-
if response.status_code==200:
|
| 128 |
-
app.logger.info(f"Kernel for instance {input.instance_id} restarted.")
|
| 129 |
-
else:
|
| 130 |
-
app.logger.info(f"Error when restarting kernel of instance {input.instance_id}: {response.json()}.")
|
| 131 |
-
|
| 132 |
-
response = requests.post(f'http://localhost:{port}/execute', json={'code': input.code})
|
| 133 |
-
result = response.json()
|
| 134 |
-
return result
|
| 135 |
-
|
| 136 |
-
except Exception as e:
|
| 137 |
-
app.logger.info(f"Error in execute_code: {str(e)}")
|
| 138 |
-
return jsonify({
|
| 139 |
-
'result': 'error',
|
| 140 |
-
'output': str(e)
|
| 141 |
-
}), 500
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
def init_app(app, args=None):
|
| 145 |
-
if args is None:
|
| 146 |
-
# When run through Gunicorn, use environment variables
|
| 147 |
-
args = argparse.Namespace(
|
| 148 |
-
n_instances=int(os.getenv('N_INSTANCES', 1)),
|
| 149 |
-
n_cpus=int(os.getenv('N_CPUS', 1)),
|
| 150 |
-
mem=os.getenv('MEM', '1g'),
|
| 151 |
-
execution_timeout=int(os.getenv('EXECUTION_TIMEOUT', 60))
|
| 152 |
-
)
|
| 153 |
-
|
| 154 |
-
app.containers = create_kernel_containers(
|
| 155 |
-
args.n_instances,
|
| 156 |
-
n_cpus=args.n_cpus,
|
| 157 |
-
mem=args.mem,
|
| 158 |
-
execution_timeout=args.execution_timeout
|
| 159 |
-
)
|
| 160 |
-
return app, args
|
| 161 |
-
|
| 162 |
-
atexit.register(shutdown_cleanup)
|
| 163 |
-
|
| 164 |
-
if __name__ == '__main__':
|
| 165 |
-
args = parse_args()
|
| 166 |
-
app, args = init_app(app, args=args)
|
| 167 |
-
# don't use debug=True --> it will run main twice and thus start double the containers
|
| 168 |
-
app.run(debug=False, host='0.0.0.0', port=args.port)
|
| 169 |
-
else:
|
| 170 |
-
app, args = init_app(app)
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
# TODO:
|
| 174 |
-
# how to mount data at runtime into the container? idea: mount a (read only)
|
| 175 |
-
# folder into the container at startup and copy the data in there. before starting
|
| 176 |
-
# the kernel we could cp the necessary data into the pwd.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
jupyter/jupyter_kernel.py → jupyter_kernel.py
RENAMED
|
File without changes
|
test.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
|
| 3 |
+
org_or_user = "lvwerra"
|
| 4 |
+
space_name = "executor"
|
| 5 |
+
|
| 6 |
+
url = f"https://{org_or_user}-{space_name}.hf.space/health"
|
| 7 |
+
print(requests.get(url).json())
|
| 8 |
+
|
| 9 |
+
url = f"https://{org_or_user}-{space_name}.hf.space/execute"
|
| 10 |
+
print(requests.post(url, json={'code': 'print(1+1)'}).json())
|