python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from enum import Enum
from typing import Tuple
from nvflare.apis.controller_spec import ClientTask, Task, TaskCompletionStatus
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
class TaskCheckStatus(Enum):
SEND = 1 # send the task to the client
BLOCK = 2 # do not send the task, and block other tasks
NO_BLOCK = 3 # do not send the task, and continue checking
class TaskManager(object):
def __init__(self):
"""Manages tasks for clients.
Programming Conventions:
A TaskManager should be implemented as a state-free object.
All task processing state info should be stored in the Task's props dict.
Name the keys in the props dict with prefix "__" to avoid potential conflict with
app-defined props.
"""
self._name = self.__class__.__name__
self.logger = logging.getLogger(self._name)
def check_task_send(self, client_task: ClientTask, fl_ctx: FLContext) -> TaskCheckStatus:
"""Determine whether the task should be sent to the client.
Default logic:
If the client already did the task, don't send again (BLOCK).
If the client is in the task's target list or the task's target
list is None (meaning all clients), then send the task (SEND). Otherwise, do not block the
task checking (NO_BLOCK), so next task will be checked.
Args:
client_task (ClientTask): the task processing state of the client
fl_ctx (FLContext): fl context that comes with the task request
Returns:
TaskCheckStatus: NO_BLOCK for not sending the task, BLOCK for waiting, SEND for OK to send
"""
if client_task.result_received_time:
# the task was already sent to the client AND result was already received
# do not send again
return TaskCheckStatus.NO_BLOCK
client_name = client_task.client.name
if client_task.task.targets is None or client_name in client_task.task.targets:
return TaskCheckStatus.SEND
else:
return TaskCheckStatus.NO_BLOCK
def check_task_exit(self, task: Task) -> Tuple[bool, TaskCompletionStatus]:
"""Determine whether the task should exit.
Args:
task (Task): an instance of Task
Returns:
Tuple[bool, TaskCompletionStatus]:
first entry in the tuple means whether to exit the task or not. If it's True, the task should exit.
second entry in the tuple indicates the TaskCompletionStatus.
"""
pass
def check_task_result(self, result: Shareable, client_task: ClientTask, fl_ctx: FLContext):
"""Check the result received from the client.
The manager can set appropriate headers into the result to indicate certain conditions (e.g.
late response).
Args:
result (Shareable): the result to be checked
client_task (ClientTask): the task processing state of the client
fl_ctx (FLContext): fl context that comes with the task request
"""
pass
| NVFlare-main | nvflare/apis/impl/task_manager.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Tuple
from nvflare.apis.controller_spec import ClientTask, Task, TaskCompletionStatus
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import ReservedHeaderKey, Shareable
from .task_manager import TaskCheckStatus, TaskManager
_KEY_DYNAMIC_TARGETS = "__dynamic_targets"
_KEY_TASK_ASSIGN_TIMEOUT = "__task_assignment_timeout"
_KEY_TASK_RESULT_TIMEOUT = "__task_result_timeout"
_KEY_LAST_SEND_IDX = "__last_send_idx"
_PENDING_CLIENT_TASK = "__pending_client_task"
class SequentialRelayTaskManager(TaskManager):
def __init__(self, task: Task, task_assignment_timeout, task_result_timeout, dynamic_targets: bool):
"""Task manager for relay controller on SendOrder.SEQUENTIAL.
Args:
task (Task): an instance of Task
task_assignment_timeout (int): timeout value on a client requesting its task
task_result_timeout (int): timeout value on reply of one client
dynamic_targets (bool): allow clients to join after this task starts
"""
TaskManager.__init__(self)
if task_assignment_timeout is None:
task_assignment_timeout = 0
if task_result_timeout is None:
task_result_timeout = 0
task.props[_KEY_DYNAMIC_TARGETS] = dynamic_targets
task.props[_KEY_TASK_ASSIGN_TIMEOUT] = task_assignment_timeout
task.props[_KEY_TASK_RESULT_TIMEOUT] = task_result_timeout
task.props[_KEY_LAST_SEND_IDX] = -1 # client index of last send
task.props[_PENDING_CLIENT_TASK] = None
def check_task_send(self, client_task: ClientTask, fl_ctx: FLContext) -> TaskCheckStatus:
"""Determine whether the task should be sent to the client.
Args:
client_task (ClientTask): the task processing state of the client
fl_ctx (FLContext): fl context that comes with the task request
Returns:
TaskCheckStatus: NO_BLOCK for not sending the task, BLOCK for waiting, SEND for OK to send
"""
client_name = client_task.client.name
task = client_task.task
if task.props[_KEY_DYNAMIC_TARGETS]:
if task.targets is None:
task.targets = []
if client_name not in task.targets:
self.logger.debug("client_name: {} added to task.targets".format(client_name))
task.targets.append(client_name)
# is this client eligible?
if client_name not in task.targets:
# this client is not a target
return TaskCheckStatus.NO_BLOCK
# adjust client window
win_start_idx, win_end_idx = self._determine_window(task)
self.logger.debug("win_start_idx={}, win_end_idx={}".format(win_start_idx, win_end_idx))
if win_start_idx < 0:
# wait for this task to end by the monitor
return TaskCheckStatus.BLOCK
# see whether this client is in the window
for i in range(win_start_idx, win_end_idx):
if client_name == task.targets[i]:
# this client is in the window!
self.logger.debug("last_send_idx={}".format(i))
task.props[_KEY_LAST_SEND_IDX] = i
return TaskCheckStatus.SEND
# this client is not in the window
return TaskCheckStatus.NO_BLOCK
def _determine_window(self, task: Task) -> Tuple[int, int]:
"""Returns two indexes (starting/ending) of a window of client candidates.
When starting is negative and ending is 0, the window is closed and the task should exit
When both starting and ending are negative, there is no client candidate as current client task has not returned
Args:
task (Task): an instance of Task
Returns:
Tuple[int, int]: starting and ending indices of a window of client candidates.
"""
# adjust client window
task_result_timeout = task.props[_KEY_TASK_RESULT_TIMEOUT]
last_send_idx = task.props[_KEY_LAST_SEND_IDX]
last_send_target = task.targets[last_send_idx]
if last_send_idx >= 0 and last_send_target in task.last_client_task_map:
# see whether the result has been received
last_task = task.last_client_task_map[last_send_target]
self.logger.debug("last_task={}".format(last_task))
if last_task.result_received_time is None:
# result has not been received
# should this client timeout?
if task_result_timeout and time.time() - last_task.task_sent_time > task_result_timeout:
# timeout!
# we give up on this client and move to the next target
win_start_idx = last_send_idx + 1
win_start_time = last_task.task_sent_time + task_result_timeout
self.logger.debug(
"client task result timed out. win_start_idx={}, win_start_time={}".format(
win_start_idx, win_start_time
)
)
else:
# continue to wait
self.logger.debug("keep waiting on task={}".format(task))
return -1, -1
else:
# result has been received!
win_start_idx = last_send_idx + 1
win_start_time = last_task.result_received_time
self.logger.debug(
"result received. win_start_idx={}, win_start_time={}".format(win_start_idx, win_start_time)
)
else:
# nothing has been sent
win_start_idx = 0
win_start_time = task.schedule_time
self.logger.debug(
"nothing has been sent. win_start_idx={}, win_start_time={}".format(win_start_idx, win_start_time)
)
num_targets = 0 if task.targets is None else len(task.targets)
if num_targets and win_start_idx >= num_targets:
# we reached the end of targets
# so task should exit
return -1, 0
task_assignment_timeout = task.props[_KEY_TASK_ASSIGN_TIMEOUT]
if task_assignment_timeout:
win_size = int((time.time() - win_start_time) / task_assignment_timeout) + 1
else:
win_size = 1
self.logger.debug("win_size={}".format(win_size))
win_end_idx = win_start_idx + win_size
# Should exit if win extends past the entire target list + 1
if task_assignment_timeout and win_end_idx > num_targets + 1:
return -1, 0
if win_end_idx > num_targets:
win_end_idx = num_targets
self.logger.debug("win_end_idx={}".format(win_end_idx))
return win_start_idx, win_end_idx
def check_task_exit(self, task: Task) -> Tuple[bool, TaskCompletionStatus]:
"""Determine whether the task should exit.
Args:
task (Task): an instance of Task
Returns:
Tuple[bool, TaskCompletionStatus]:
first entry in the tuple means whether to exit the task or not. If it's True, the task should exit.
second entry in the tuple indicates the TaskCompletionStatus.
"""
# are we waiting for any client?
win_start_idx, win_end_idx = self._determine_window(task)
self.logger.debug("check_task_exit: win_start_idx={}, win_end_idx={}".format(win_start_idx, win_end_idx))
if win_start_idx < 0 and win_end_idx == 0:
last_send_idx = task.props[_KEY_LAST_SEND_IDX]
last_send_target = task.targets[last_send_idx]
if last_send_idx >= 0 and last_send_target in task.last_client_task_map:
# see whether the result has been received
last_client_task = task.last_client_task_map[last_send_target]
if last_client_task.result_received_time is not None:
return True, TaskCompletionStatus.OK
return True, TaskCompletionStatus.TIMEOUT
else:
return False, TaskCompletionStatus.IGNORED
def check_task_result(self, result: Shareable, client_task: ClientTask, fl_ctx: FLContext):
"""Check the result received from the client.
See whether the client_task is the last one in the task's list
If not, then it is a late response and ReservedHeaderKey.REPLY_IS_LATE is
set to True in result's header.
Args:
result (Shareable): an instance of Shareable
client_task (ClientTask): the task processing state of the client
fl_ctx (FLContext): fl context that comes with the task request
"""
# see whether the client_task is the last one in the task's list
# If not, then it is a late response
task = client_task.task
if client_task != task.client_tasks[-1]:
result.set_header(key=ReservedHeaderKey.REPLY_IS_LATE, value=True)
| NVFlare-main | nvflare/apis/impl/seq_relay_manager.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Tuple
from nvflare.apis.controller_spec import ClientTask, SendOrder, Task, TaskCompletionStatus
from nvflare.apis.fl_context import FLContext
from .task_manager import TaskCheckStatus, TaskManager
_KEY_ORDER = "__order"
_KEY_TASK_ASSIGN_TIMEOUT = "__task_assignment_timeout"
class SendTaskManager(TaskManager):
def __init__(self, task: Task, send_order: SendOrder, task_assignment_timeout):
"""Task manager for send controller.
Args:
task (Task): an instance of Task
send_order (SendOrder): the order of clients to receive task
task_assignment_timeout (int): timeout value on a client requesting its task
"""
TaskManager.__init__(self)
if task_assignment_timeout is None or task_assignment_timeout <= 0:
task_assignment_timeout = 0
task.props[_KEY_ORDER] = send_order
task.props[_KEY_TASK_ASSIGN_TIMEOUT] = task_assignment_timeout
def check_task_send(self, client_task: ClientTask, fl_ctx: FLContext) -> TaskCheckStatus:
"""Determine whether the task should be sent to the client.
Args:
client_task (ClientTask): the task processing state of the client
fl_ctx (FLContext): fl context that comes with the task request
Returns:
TaskCheckStatus: NO_BLOCK for not sending the task, BLOCK for waiting, SEND for OK to send
"""
task = client_task.task
if len(task.client_tasks) > 0: # already sent to one client
if client_task.task_sent_time is not None: # the task was sent to this client!
if client_task.result_received_time is not None:
# the task result was already received
# this task is actually done - waiting to end by the monitor
return TaskCheckStatus.NO_BLOCK
else:
return TaskCheckStatus.SEND
else: # the task was sent to someone else
return TaskCheckStatus.NO_BLOCK
# in SEQUENTIAL mode - targets must be explicitly specified
# is this client eligible?
try:
client_idx = task.targets.index(client_task.client.name)
except ValueError:
client_idx = -1
if client_idx < 0:
# this client is not a target
return TaskCheckStatus.NO_BLOCK
if task.props[_KEY_ORDER] == SendOrder.ANY:
return TaskCheckStatus.SEND
task_assignment_timeout = task.props[_KEY_TASK_ASSIGN_TIMEOUT]
if task_assignment_timeout == 0:
# no client timeout - can only send to the first target
eligible_client_idx = 0
else:
elapsed = time.time() - task.create_time
eligible_client_idx = int(elapsed / task_assignment_timeout)
if client_idx <= eligible_client_idx:
return TaskCheckStatus.SEND
else:
# this client is currently not eligible but could be later
# since this client is involved in the task, we need to wait until this task is resolved!
return TaskCheckStatus.BLOCK
def check_task_exit(self, task: Task) -> Tuple[bool, TaskCompletionStatus]:
"""Determine whether the task should exit.
Args:
task (Task): an instance of Task
Tuple[bool, TaskCompletionStatus]:
first entry in the tuple means whether to exit the task or not. If it's True, the task should exit.
second entry in the tuple indicates the TaskCompletionStatus.
"""
if len(task.client_tasks) > 0:
# there should be only a single item in the task's client status list
# because only a single client is sent the task!
for s in task.client_tasks:
if s.result_received_time is not None:
# this task is done!
return True, TaskCompletionStatus.OK
# no one is working on this task yet or the task is not done
return False, TaskCompletionStatus.IGNORED
| NVFlare-main | nvflare/apis/impl/send_manager.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import pathlib
import shutil
import tempfile
import time
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Union
from nvflare.apis.client_engine_spec import ClientEngineSpec
from nvflare.apis.fl_context import FLContext
from nvflare.apis.job_def import Job, JobDataKey, JobMetaKey, job_from_meta, new_job_id
from nvflare.apis.job_def_manager_spec import JobDefManagerSpec, RunStatus
from nvflare.apis.server_engine_spec import ServerEngineSpec
from nvflare.apis.storage import WORKSPACE, StorageException, StorageSpec
from nvflare.fuel.utils import fobs
from nvflare.fuel.utils.zip_utils import unzip_all_from_bytes, zip_directory_to_bytes
class _JobFilter(ABC):
@abstractmethod
def filter_job(self, meta: dict) -> bool:
pass
class _StatusFilter(_JobFilter):
def __init__(self, status_to_check):
self.result = []
self.status_to_check = status_to_check
def filter_job(self, meta: dict):
if meta[JobMetaKey.STATUS] == self.status_to_check:
self.result.append(job_from_meta(meta))
return True
class _AllJobsFilter(_JobFilter):
def __init__(self):
self.result = []
def filter_job(self, meta: dict):
self.result.append(job_from_meta(meta))
return True
class _ReviewerFilter(_JobFilter):
def __init__(self, reviewer_name, fl_ctx: FLContext):
"""Not used yet, for use in future implementations."""
self.result = []
self.reviewer_name = reviewer_name
def filter_job(self, meta: dict):
approvals = meta.get(JobMetaKey.APPROVALS)
if not approvals or self.reviewer_name not in approvals:
self.result.append(job_from_meta(meta))
return True
# TODO:: use try block around storage calls
class SimpleJobDefManager(JobDefManagerSpec):
def __init__(self, uri_root: str = "jobs", job_store_id: str = "job_store"):
super().__init__()
self.uri_root = uri_root
os.makedirs(uri_root, exist_ok=True)
self.job_store_id = job_store_id
def _get_job_store(self, fl_ctx):
engine = fl_ctx.get_engine()
if not (isinstance(engine, ServerEngineSpec) or isinstance(engine, ClientEngineSpec)):
raise TypeError(f"engine should be of type ServerEngineSpec or ClientEngineSpec, but got {type(engine)}")
store = engine.get_component(self.job_store_id)
if not isinstance(store, StorageSpec):
raise TypeError(f"engine should have a job store component of type StorageSpec, but got {type(store)}")
return store
def job_uri(self, jid: str):
return os.path.join(self.uri_root, jid)
def create(self, meta: dict, uploaded_content: bytes, fl_ctx: FLContext) -> Dict[str, Any]:
# validate meta to make sure it has:
jid = meta.get(JobMetaKey.JOB_ID.value, None)
if not jid:
jid = new_job_id()
meta[JobMetaKey.JOB_ID.value] = jid
now = time.time()
meta[JobMetaKey.SUBMIT_TIME.value] = now
meta[JobMetaKey.SUBMIT_TIME_ISO.value] = datetime.datetime.fromtimestamp(now).astimezone().isoformat()
meta[JobMetaKey.START_TIME.value] = ""
meta[JobMetaKey.DURATION.value] = "N/A"
meta[JobMetaKey.STATUS.value] = RunStatus.SUBMITTED.value
# write it to the store
stored_data = {JobDataKey.JOB_DATA.value: uploaded_content, JobDataKey.WORKSPACE_DATA.value: None}
store = self._get_job_store(fl_ctx)
store.create_object(self.job_uri(jid), fobs.dumps(stored_data), meta, overwrite_existing=True)
return meta
def delete(self, jid: str, fl_ctx: FLContext):
store = self._get_job_store(fl_ctx)
store.delete_object(self.job_uri(jid))
def _validate_meta(self, meta):
"""Validate meta
Args:
meta: meta to validate
Returns:
"""
pass
def _validate_uploaded_content(self, uploaded_content) -> bool:
"""Validate uploaded content for creating a run config. (THIS NEEDS TO HAPPEN BEFORE CONTENT IS PROVIDED NOW)
Internally used by create and update.
1. check all sites in deployment are in resources
2. each site in deployment need to have resources (each site in resource need to be in deployment ???)
"""
pass
def get_job(self, jid: str, fl_ctx: FLContext) -> Optional[Job]:
store = self._get_job_store(fl_ctx)
try:
job_meta = store.get_meta(self.job_uri(jid))
return job_from_meta(job_meta)
except StorageException:
return None
def set_results_uri(self, jid: str, result_uri: str, fl_ctx: FLContext):
store = self._get_job_store(fl_ctx)
updated_meta = {JobMetaKey.RESULT_LOCATION.value: result_uri}
store.update_meta(self.job_uri(jid), updated_meta, replace=False)
return self.get_job(jid, fl_ctx)
def get_app(self, job: Job, app_name: str, fl_ctx: FLContext) -> bytes:
temp_dir = tempfile.mkdtemp()
job_id_dir = self._load_job_data_from_store(job.job_id, temp_dir, fl_ctx)
job_folder = os.path.join(job_id_dir, job.meta[JobMetaKey.JOB_FOLDER_NAME.value])
fullpath_src = os.path.join(job_folder, app_name)
result = zip_directory_to_bytes(fullpath_src, "")
shutil.rmtree(temp_dir)
return result
def get_apps(self, job: Job, fl_ctx: FLContext) -> Dict[str, bytes]:
temp_dir = tempfile.mkdtemp()
job_id_dir = self._load_job_data_from_store(job.job_id, temp_dir, fl_ctx)
job_folder = os.path.join(job_id_dir, job.meta[JobMetaKey.JOB_FOLDER_NAME.value])
result_dict = {}
for app in job.get_deployment():
fullpath_src = os.path.join(job_folder, app)
result_dict[app] = zip_directory_to_bytes(fullpath_src, "")
shutil.rmtree(temp_dir)
return result_dict
def _load_job_data_from_store(self, jid: str, temp_dir: str, fl_ctx: FLContext):
data_bytes = self.get_content(jid, fl_ctx)
job_id_dir = os.path.join(temp_dir, jid)
if os.path.exists(job_id_dir):
shutil.rmtree(job_id_dir)
os.mkdir(job_id_dir)
unzip_all_from_bytes(data_bytes, job_id_dir)
return job_id_dir
def get_content(self, jid: str, fl_ctx: FLContext) -> Optional[bytes]:
store = self._get_job_store(fl_ctx)
try:
stored_data = store.get_data(self.job_uri(jid))
except StorageException:
return None
return fobs.loads(stored_data).get(JobDataKey.JOB_DATA.value)
def get_job_data(self, jid: str, fl_ctx: FLContext) -> dict:
store = self._get_job_store(fl_ctx)
stored_data = store.get_data(self.job_uri(jid))
return fobs.loads(stored_data)
def set_status(self, jid: str, status: RunStatus, fl_ctx: FLContext):
meta = {JobMetaKey.STATUS.value: status.value}
store = self._get_job_store(fl_ctx)
if status == RunStatus.RUNNING.value:
meta[JobMetaKey.START_TIME.value] = str(datetime.datetime.now())
elif status in [
RunStatus.FINISHED_ABORTED.value,
RunStatus.FINISHED_COMPLETED.value,
RunStatus.FINISHED_EXECUTION_EXCEPTION.value,
RunStatus.FINISHED_CANT_SCHEDULE.value,
]:
job_meta = store.get_meta(self.job_uri(jid))
if job_meta[JobMetaKey.START_TIME.value]:
start_time = datetime.datetime.strptime(
job_meta.get(JobMetaKey.START_TIME.value), "%Y-%m-%d %H:%M:%S.%f"
)
meta[JobMetaKey.DURATION.value] = str(datetime.datetime.now() - start_time)
store.update_meta(uri=self.job_uri(jid), meta=meta, replace=False)
def update_meta(self, jid: str, meta, fl_ctx: FLContext):
store = self._get_job_store(fl_ctx)
store.update_meta(uri=self.job_uri(jid), meta=meta, replace=False)
def refresh_meta(self, job: Job, meta_keys: list, fl_ctx: FLContext):
"""Refresh meta of the job as specified in the meta keys
Save the values of the specified keys into job store
Args:
job: job object
meta_keys: meta keys need to updated
fl_ctx: FLContext
"""
if meta_keys:
meta = {}
for k in meta_keys:
if k in job.meta:
meta[k] = job.meta[k]
else:
meta = job.meta
if meta:
self.update_meta(job.job_id, meta, fl_ctx)
def get_all_jobs(self, fl_ctx: FLContext) -> List[Job]:
job_filter = _AllJobsFilter()
self._scan(job_filter, fl_ctx)
return job_filter.result
def _scan(self, job_filter: _JobFilter, fl_ctx: FLContext):
store = self._get_job_store(fl_ctx)
jid_paths = store.list_objects(self.uri_root)
if not jid_paths:
return
for jid_path in jid_paths:
jid = pathlib.PurePath(jid_path).name
meta = store.get_meta(self.job_uri(jid))
if meta:
ok = job_filter.filter_job(meta)
if not ok:
break
def get_jobs_by_status(self, status, fl_ctx: FLContext) -> List[Job]:
job_filter = _StatusFilter(status)
self._scan(job_filter, fl_ctx)
return job_filter.result
def get_jobs_waiting_for_review(self, reviewer_name: str, fl_ctx: FLContext) -> List[Job]:
job_filter = _ReviewerFilter(reviewer_name, fl_ctx)
self._scan(job_filter, fl_ctx)
return job_filter.result
def set_approval(
self, jid: str, reviewer_name: str, approved: bool, note: str, fl_ctx: FLContext
) -> Dict[str, Any]:
meta = self.get_job(jid, fl_ctx).meta
if meta:
approvals = meta.get(JobMetaKey.APPROVALS)
if not approvals:
approvals = {}
meta[JobMetaKey.APPROVALS.value] = approvals
approvals[reviewer_name] = (approved, note)
updated_meta = {JobMetaKey.APPROVALS.value: approvals}
store = self._get_job_store(fl_ctx)
store.update_meta(self.job_uri(jid), updated_meta, replace=False)
return meta
def save_workspace(self, jid: str, data: Union[bytes, str], fl_ctx: FLContext):
store = self._get_job_store(fl_ctx)
store.update_object(self.job_uri(jid), data, WORKSPACE)
def get_storage_component(self, jid: str, component: str, fl_ctx: FLContext):
store = self._get_job_store(fl_ctx)
return store.get_data(self.job_uri(jid), component)
def get_storage_for_download(
self, jid: str, download_dir: str, component: str, download_file: str, fl_ctx: FLContext
):
store = self._get_job_store(fl_ctx)
os.makedirs(os.path.join(download_dir, jid), exist_ok=True)
destination_file = os.path.join(download_dir, jid, download_file)
store.get_data_for_download(self.job_uri(jid), component, destination_file)
| NVFlare-main | nvflare/apis/impl/job_def_manager.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Tuple
from nvflare.apis.controller_spec import ClientTask, Task, TaskCompletionStatus
from nvflare.apis.fl_context import FLContext
from .task_manager import TaskCheckStatus, TaskManager
_KEY_MIN_RESPS = "__min_responses"
_KEY_WAIT_TIME_AFTER_MIN_RESPS = "__wait_time_after_min_received"
_KEY_MIN_RESPS_RCV_TIME = "__min_resps_received_time"
class BcastTaskManager(TaskManager):
def __init__(self, task: Task, min_responses: int = 0, wait_time_after_min_received: int = 0):
"""Task manager for broadcast controller.
Args:
task (Task): an instance of Task
min_responses (int, optional): the minimum number of responses so this task is considered finished. Defaults to 0.
wait_time_after_min_received (int, optional): additional wait time for late clients to contribute their results. Defaults to 0.
"""
TaskManager.__init__(self)
task.props[_KEY_MIN_RESPS] = min_responses
task.props[_KEY_WAIT_TIME_AFTER_MIN_RESPS] = wait_time_after_min_received
task.props[_KEY_MIN_RESPS_RCV_TIME] = None
def check_task_exit(self, task: Task) -> Tuple[bool, TaskCompletionStatus]:
"""Determine if the task should exit.
Args:
task (Task): an instance of Task
Tuple[bool, TaskCompletionStatus]:
first entry in the tuple means whether to exit the task or not. If it's True, the task should exit.
second entry in the tuple indicates the TaskCompletionStatus.
"""
if len(task.client_tasks) == 0:
# nothing has been sent - continue to wait
return False, TaskCompletionStatus.IGNORED
clients_responded = 0
clients_not_responded = 0
for s in task.client_tasks:
if s.result_received_time is None:
clients_not_responded += 1
else:
clients_responded += 1
if clients_responded >= len(task.targets):
# all clients have responded!
return True, TaskCompletionStatus.OK
# if min_responses is 0, need to have all client tasks responded
if task.props[_KEY_MIN_RESPS] == 0 and clients_not_responded > 0:
return False, TaskCompletionStatus.IGNORED
# check if minimum responses are received
if clients_responded == 0 or clients_responded < task.props[_KEY_MIN_RESPS]:
# continue to wait
return False, TaskCompletionStatus.IGNORED
# minimum responses received
min_resps_received_time = task.props[_KEY_MIN_RESPS_RCV_TIME]
if min_resps_received_time is None:
min_resps_received_time = time.time()
task.props[_KEY_MIN_RESPS_RCV_TIME] = min_resps_received_time
# see whether we have waited for long enough
if time.time() - min_resps_received_time >= task.props[_KEY_WAIT_TIME_AFTER_MIN_RESPS]:
# yes - exit the task
return True, TaskCompletionStatus.OK
else:
# no - continue to wait
return False, TaskCompletionStatus.IGNORED
class BcastForeverTaskManager(TaskManager):
def __init__(self):
"""Task manager for broadcast controller with forever waiting time."""
TaskManager.__init__(self)
def check_task_send(self, client_task: ClientTask, fl_ctx: FLContext) -> TaskCheckStatus:
"""Determine whether the task should be sent to the client.
Args:
client_task (ClientTask): the task processing state of the client
fl_ctx (FLContext): fl context that comes with the task request
Returns:
TaskCheckStatus: NO_BLOCK for not sending the task, SEND for OK to send
"""
# Note: even if the client may have done the task, we may still send it!
client_name = client_task.client.name
if client_task.task.targets is None or client_name in client_task.task.targets:
return TaskCheckStatus.SEND
else:
return TaskCheckStatus.NO_BLOCK
def check_task_exit(self, task: Task) -> Tuple[bool, TaskCompletionStatus]:
"""Determine whether the task should exit.
Args:
task (Task): an instance of Task
Tuple[bool, TaskCompletionStatus]:
first entry in the tuple means whether to exit the task or not. If it's True, the task should exit.
second entry in the tuple indicates the TaskCompletionStatus.
"""
# never exit
return False, TaskCompletionStatus.IGNORED
| NVFlare-main | nvflare/apis/impl/bcast_manager.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Tuple
from nvflare.apis.controller_spec import ClientTask, Task, TaskCompletionStatus
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import ReservedHeaderKey, Shareable
from .task_manager import TaskCheckStatus, TaskManager
_KEY_DYNAMIC_TARGETS = "__dynamic_targets"
_KEY_TASK_RESULT_TIMEOUT = "__task_result_timeout"
_KEY_SEND_TARGET_COUNTS = "__sent_target_count"
_KEY_PENDING_CLIENT = "__pending_client"
class AnyRelayTaskManager(TaskManager):
def __init__(self, task: Task, task_result_timeout, dynamic_targets):
"""Task manager for relay controller on SendOrder.ANY.
Args:
task (Task): an instance of Task
task_result_timeout (int): timeout value on reply of one client
dynamic_targets (bool): allow clients to join after this task starts
"""
TaskManager.__init__(self)
if task_result_timeout is None:
task_result_timeout = 0
task.props[_KEY_DYNAMIC_TARGETS] = dynamic_targets
task.props[_KEY_TASK_RESULT_TIMEOUT] = task_result_timeout
task.props[_KEY_SEND_TARGET_COUNTS] = {} # target name => times sent
task.props[_KEY_PENDING_CLIENT] = None
def check_task_send(self, client_task: ClientTask, fl_ctx: FLContext) -> TaskCheckStatus:
"""Determine whether the task should be sent to the client.
Args:
client_task (ClientTask): the task processing state of the client
fl_ctx (FLContext): fl context that comes with the task request
Raises:
RuntimeError: when a client asking for a task while the same client_task has already been dispatched to it
Returns:
TaskCheckStatus: NO_BLOCK for not sending the task, BLOCK for waiting, SEND for OK to send
"""
client_name = client_task.client.name
task = client_task.task
if task.props[_KEY_DYNAMIC_TARGETS]:
if task.targets is None:
task.targets = []
if client_name not in task.targets:
task.targets.append(client_name)
# is this client eligible?
if client_name not in task.targets:
# this client is not a target
return TaskCheckStatus.NO_BLOCK
client_occurrences = task.targets.count(client_name)
sent_target_count = task.props[_KEY_SEND_TARGET_COUNTS]
send_count = sent_target_count.get(client_name, 0)
if send_count >= client_occurrences:
# already sent enough times to this client
return TaskCheckStatus.NO_BLOCK
# only allow one pending task. Is there a client pending result?
pending_client_name = task.props[_KEY_PENDING_CLIENT]
task_result_timeout = task.props[_KEY_TASK_RESULT_TIMEOUT]
if pending_client_name is not None:
# see whether the result has been received
pending_task = task.last_client_task_map[pending_client_name]
if pending_task.result_received_time is None:
# result has not been received
# Note: in this case, the pending client and the asking client must not be the
# same, because this would be a resend case already taken care of by the controller.
if pending_client_name == client_name:
raise RuntimeError("Logic Error: must not be here for client {}".format(client_name))
# should this client timeout?
if task_result_timeout and time.time() - pending_task.task_sent_time > task_result_timeout:
# timeout!
# give up on the pending task and move to the next target
sent_target_count[pending_client_name] -= 1
pass
else:
# continue to wait
return TaskCheckStatus.BLOCK
# can send
task.props[_KEY_PENDING_CLIENT] = client_name
sent_target_count[client_name] = send_count + 1
return TaskCheckStatus.SEND
def check_task_exit(self, task: Task) -> Tuple[bool, TaskCompletionStatus]:
"""Determine whether the task should exit.
Args:
task (Task): an instance of Task
Returns:
Tuple[bool, TaskCompletionStatus]:
first entry in the tuple means whether to exit the task or not. If it's True, the task should exit.
second entry in the tuple indicates the TaskCompletionStatus.
"""
# are we waiting for any client?
num_targets = 0 if task.targets is None else len(task.targets)
if num_targets == 0:
# nothing has been sent
return False, TaskCompletionStatus.IGNORED
# see whether all targets are sent
sent_target_count = task.props[_KEY_SEND_TARGET_COUNTS]
total_sent = 0
for v in sent_target_count.values():
total_sent += v
if total_sent < num_targets:
return False, TaskCompletionStatus.IGNORED
# client_tasks might have not been added to task
if len(task.client_tasks) < num_targets:
return False, TaskCompletionStatus.IGNORED
for c_t in task.client_tasks:
if c_t.result_received_time is None:
return False, TaskCompletionStatus.IGNORED
return True, TaskCompletionStatus.OK
def check_task_result(self, result: Shareable, client_task: ClientTask, fl_ctx: FLContext):
"""Check the result received from the client.
See whether the client_task is the last one in the task's list
If not, then it is a late response and ReservedHeaderKey.REPLY_IS_LATE is
set to True in result's header.
Args:
result (Shareable): an instance of Shareable
client_task (ClientTask): the task processing state of the client
fl_ctx (FLContext): fl context that comes with the task request
"""
task = client_task.task
if client_task != task.client_tasks[-1]:
result.set_header(key=ReservedHeaderKey.REPLY_IS_LATE, value=True)
| NVFlare-main | nvflare/apis/impl/any_relay_manager.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nvflare.apis.fl_constant import FLContextKey, NonSerializableKeys
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.fuel.sec.audit import AuditService
from nvflare.fuel.utils import fobs
from nvflare.security.logging import secure_format_exception
logger = logging.getLogger("fl_context_utils")
def get_serializable_data(fl_ctx: FLContext):
new_fl_ctx = FLContext()
for k, v in fl_ctx.props.items():
if k not in NonSerializableKeys.KEYS:
try:
fobs.dumps(v)
new_fl_ctx.props[k] = v
except Exception as e:
msg = f"Object in FLContext with key {k} and type {type(v)} is not serializable (discarded): {secure_format_exception(e)}"
logger.warning(generate_log_message(fl_ctx, msg))
return new_fl_ctx
def generate_log_message(fl_ctx: FLContext, msg: str):
if not fl_ctx:
return msg
_identity_ = "identity"
_my_run = "run"
_peer_run = "peer_run"
_peer_name = "peer"
_task_name = "task_name"
_task_id = "task_id"
_rc = "peer_rc"
_wf = "wf"
all_kvs = {_identity_: fl_ctx.get_identity_name()}
my_run = fl_ctx.get_job_id()
if not my_run:
my_run = "?"
all_kvs[_my_run] = my_run
task_name = fl_ctx.get_prop(FLContextKey.TASK_NAME, None)
task_id = fl_ctx.get_prop(FLContextKey.TASK_ID, None)
if task_name:
all_kvs[_task_name] = task_name
if task_id:
all_kvs[_task_id] = task_id
wf_id = fl_ctx.get_prop(FLContextKey.WORKFLOW, None)
if wf_id is not None:
all_kvs[_wf] = wf_id
peer_ctx = fl_ctx.get_peer_context()
if peer_ctx:
if not isinstance(peer_ctx, FLContext):
raise TypeError("peer_ctx must be an instance of FLContext, but got {}".format(type(peer_ctx)))
peer_run = peer_ctx.get_job_id()
if not peer_run:
peer_run = "?"
all_kvs[_peer_run] = peer_run
peer_name = peer_ctx.get_identity_name()
if not peer_name:
peer_name = "?"
all_kvs[_peer_name] = peer_name
reply = fl_ctx.get_prop(FLContextKey.REPLY, None)
if isinstance(reply, Shareable):
rc = reply.get_return_code("OK")
all_kvs[_rc] = rc
item_order = [_identity_, _my_run, _wf, _peer_name, _peer_run, _rc, _task_name, _task_id]
ctx_items = []
for item in item_order:
if item in all_kvs:
ctx_items.append(item + "=" + str(all_kvs[item]))
return "[" + ", ".join(ctx_items) + "]: " + msg
def add_job_audit_event(fl_ctx: FLContext, ref: str = "", msg: str = "") -> str:
return AuditService.add_job_event(
job_id=fl_ctx.get_job_id(),
scope_name=fl_ctx.get_prop(FLContextKey.EFFECTIVE_JOB_SCOPE_NAME, "?"),
task_name=fl_ctx.get_prop(FLContextKey.TASK_NAME, "?"),
task_id=fl_ctx.get_prop(FLContextKey.TASK_ID, "?"),
ref=ref,
msg=msg,
)
| NVFlare-main | nvflare/apis/utils/fl_context_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import os
from typing import Optional
from zipfile import ZipFile
from nvflare.apis.fl_constant import JobConstants
from nvflare.apis.job_def import ALL_SITES, JobMetaKey
from nvflare.fuel.utils.config import ConfigFormat
from nvflare.fuel.utils.config_factory import ConfigFactory
from nvflare.fuel.utils.zip_utils import normpath_for_zip, zip_directory_to_bytes
def _get_default_meta(job_folder_name: str) -> str:
# A format string for the dummy meta.json
meta = f"""{{
"{JobMetaKey.JOB_NAME.value}": "{job_folder_name}",
"{JobMetaKey.JOB_FOLDER_NAME.value}": "{job_folder_name}",
"{JobMetaKey.RESOURCE_SPEC.value}": {{ }},
"{JobMetaKey.DEPLOY_MAP.value}": {{ "{job_folder_name}": ["{ALL_SITES}"] }},
"{JobMetaKey.MIN_CLIENTS.value}": 1
}}
"""
return meta
def convert_legacy_zipped_app_to_job(zip_data: bytes) -> bytes:
"""Convert a legacy app in zip into job layout in memory.
Args:
zip_data: The input zip data
Returns:
The converted zip data
"""
meta: Optional[dict] = None
reader = io.BytesIO(zip_data)
with ZipFile(reader, "r") as in_zip:
info_list = in_zip.infolist()
folder_name = info_list[0].filename.split("/")[0]
meta_file = os.path.join(folder_name, JobConstants.META)
meta_json = normpath_for_zip(os.path.join(folder_name, JobConstants.META_FILE))
meta_path = None
for ext, fmt in ConfigFormat.config_ext_formats().items():
meta_file_path = normpath_for_zip(f"{meta_file}{ext}")
if next((info for info in info_list if info.filename == meta_file_path), None):
# Already in job layout
meta_path = meta_file_path
config_loader = ConfigFactory.get_config_loader(fmt)
meta_data = in_zip.read(meta_path)
meta = config_loader.load_config_from_str(meta_data.decode()).to_dict()
if JobMetaKey.JOB_FOLDER_NAME.value not in meta:
meta[JobMetaKey.JOB_FOLDER_NAME.value] = folder_name
else:
return zip_data
break
writer = io.BytesIO()
with ZipFile(writer, "w") as out_zip:
if meta:
out_zip.writestr(meta_json, json.dumps(meta))
out_zip.comment = in_zip.comment # preserve the comment
for info in info_list:
if info.filename != meta_path:
out_zip.writestr(info, in_zip.read(info.filename))
else:
out_zip.writestr(meta_json, _get_default_meta(folder_name))
# Push everything else to a sub folder with the same name:
# hello-pt/README.md -> hello-pt/hello-pt/README.md
for info in info_list:
name = info.filename
content = in_zip.read(name)
path = folder_name + "/" + name
info.filename = path
out_zip.writestr(info, content)
return writer.getvalue()
def load_job_def_bytes(from_path: str, def_name: str) -> bytes:
"""Load a job definition from specified path and return zipped bytes
Args:
from_path: path where the job definition is located
def_name: name of the job
Returns:
"""
# zip the job folder
data = zip_directory_to_bytes(from_path, def_name)
return convert_legacy_zipped_app_to_job(data)
| NVFlare-main | nvflare/apis/utils/job_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/apis/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.fl_constant import FilterKey, FLContextKey
def apply_filters(filters_name, filter_data, fl_ctx, config_filters, task_name, direction):
filter_list = []
scope_object = fl_ctx.get_prop(FLContextKey.SCOPE_OBJECT)
if scope_object:
filters = getattr(scope_object, filters_name)
if filters:
filter_list.extend(filters.get(direction, []))
task_filter_list = config_filters.get(task_name + FilterKey.DELIMITER + direction)
if task_filter_list:
filter_list.extend(task_filter_list)
if filter_list:
for f in filter_list:
filter_data = f.process(filter_data, fl_ctx)
return filter_data
| NVFlare-main | nvflare/apis/utils/task_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import re
from functools import wraps
type_pattern_mapping = {
"server": r"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$",
"overseer": r"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$",
"sp_end_point": r"^((([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9]):[0-9]*:[0-9]*)$",
"client": r"^[A-Za-z0-9-_]+$",
"admin": r"^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}$",
"email": r"^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}$",
"org": r"^[A-Za-z0-9_]+$",
}
def name_check(name: str, entity_type: str):
regex_pattern = type_pattern_mapping.get(entity_type)
if regex_pattern is None:
return True, "entity_type={} not defined, unable to check name={}.".format(entity_type, name)
if re.match(regex_pattern, name):
return False, "name={} passed on regex_pattern={} check".format(name, regex_pattern)
else:
return True, "name={} is ill-formatted based on regex_pattern={}".format(name, regex_pattern)
def validate_class_methods_args(cls):
for name, method in inspect.getmembers(cls, inspect.isfunction):
if name != "__init_subclass__":
setattr(cls, name, validate_args(method))
return cls
def validate_args(method):
signature = inspect.signature(method)
@wraps(method)
def wrapper(*args, **kwargs):
bound_arguments = signature.bind(*args, **kwargs)
for name, value in bound_arguments.arguments.items():
annotation = signature.parameters[name].annotation
if not (annotation is inspect.Signature.empty or isinstance(value, annotation)):
raise TypeError(
"argument '{}' of {} must be {} but got {}".format(name, method, annotation, type(value))
)
return method(*args, **kwargs)
return wrapper
| NVFlare-main | nvflare/apis/utils/format_check.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decomposers for objects used by NVFlare itself
This module contains all the decomposers used to run NVFlare.
The decomposers are registered at server/client startup.
"""
import os
from argparse import Namespace
from typing import Any
from nvflare.apis.client import Client
from nvflare.apis.dxo import DXO
from nvflare.apis.fl_context import FLContext
from nvflare.apis.fl_snapshot import RunSnapshot
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.apis.workspace import Workspace
from nvflare.fuel.utils import fobs
from nvflare.fuel.utils.fobs.datum import Datum, DatumManager, DatumRef
from nvflare.fuel.utils.fobs.decomposer import Decomposer, DictDecomposer
# The __init__ initializes logger so generic decomposers can't be used
class ContextDecomposer(Decomposer):
def supported_type(self):
return FLContext
def decompose(self, target: FLContext, manager: DatumManager = None) -> Any:
return [target.model, target.props]
def recompose(self, data: Any, manager: DatumManager = None) -> FLContext:
obj = FLContext()
obj.model = data[0]
obj.props = data[1]
return obj
# Workspace does directory check so generic decomposer is not used
class WorkspaceDecomposer(Decomposer):
def supported_type(self):
return Workspace
def decompose(self, target: Workspace, manager: DatumManager = None) -> Any:
return [target.root_dir, target.site_name, target.config_folder]
def recompose(self, data: Any, manager: DatumManager = None) -> Workspace:
return Workspace(data[0], data[1], data[2])
def register():
if register.registered:
return
fobs.register(DictDecomposer(Shareable))
fobs.register_data_classes(DXO, Client, RunSnapshot, Signal, Namespace, Datum, DatumRef)
fobs.register_folder(os.path.dirname(__file__), __package__)
register.registered = True
register.registered = False
| NVFlare-main | nvflare/apis/utils/decomposers/flare_decomposers.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/apis/utils/decomposers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import traceback
SECURE_LOGGING_VAR_NAME = "NVFLARE_SECURE_LOGGING"
def is_secure() -> bool:
"""Checks if logging is set to secure mode.
This is controlled by the system environment variable NVFLARE_SECURE_LOGGING.
To set secure mode, set this var to 'true' or '1'.
Returns:
A boolean indicates whether logging is set in secure mode.
"""
secure_logging = os.environ.get(SECURE_LOGGING_VAR_NAME, False)
if isinstance(secure_logging, str):
secure_logging = secure_logging.lower()
return secure_logging == "1" or secure_logging == "true"
else:
return False
class _Frame(object):
def __init__(self, line_text):
self.line_text = line_text
self.count = 1
def _format_exc_securely() -> str:
"""Mimics traceback.format_exc() but exclude detailed call info and exception detail since
they might contain sensitive info.
Returns:
A formatted string of current exception and call stack.
"""
exc_type, exc_obj, tb = sys.exc_info()
result = ["Traceback (most recent call last):"]
frames = []
last_frame = None
# traceback (tb) stack is a linked list of frames
while tb:
file_name = tb.tb_frame.f_code.co_filename
func_name = tb.tb_frame.f_code.co_name
line = tb.tb_lineno
line_text = f'File "{file_name}", line {line}, in {func_name}'
if not last_frame or last_frame.line_text != line_text:
last_frame = _Frame(line_text)
frames.append(last_frame)
else:
# same text as last frame
last_frame.count += 1
tb = tb.tb_next
for f in frames:
result.append(f.line_text)
if f.count > 1:
result.append(f"[Previous line repeated {f.count-1} more times]")
text = "\r\n ".join(result)
return "{}\r\n{}".format(text, f"Exception Type: {exc_type}")
def secure_format_traceback() -> str:
"""Formats the traceback of the current exception and returns a string without sensitive info.
If secure mode is set, only include file names, line numbers and func names.
Exception info only includes the type of the exception.
If secure mode is not set, return the result of traceback.format_exc().
Returns:
A formatted string
"""
if is_secure():
return _format_exc_securely()
else:
return traceback.format_exc()
def secure_log_traceback(logger: logging.Logger = None):
"""Logs the traceback.
If secure mode is set, the traceback only includes file names, line numbers and func names;
and only the type of the exception.
If secure mode is not set, the traceback will be logged normally as traceback.print_exc().
Args:
logger: if not None, this logger is used to log the traceback detail. If None, the root logger will be used.
"""
exc_detail = secure_format_traceback()
if not logger:
logger = logging.getLogger()
logger.error(exc_detail)
def secure_format_exception(e: Exception) -> str:
"""Formats the specified exception and return a string without sensitive info.
If secure mode is set, only return the type of the exception;
If secure mode is not set, return the result of str(e).
Args:
e: the exception to be formatted
Returns:
A formatted exception string.
"""
if is_secure():
return str(type(e))
else:
return f"{type(e).__name__}: {str(e)}"
| NVFlare-main | nvflare/security/logging.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
from nvflare.apis.fl_constant import AdminCommandNames as AC
from nvflare.fuel.sec.authz import Authorizer, AuthzContext
class CommandCategory(object):
MANAGE_JOB = "manage_job"
OPERATE = "operate"
VIEW = "view"
SHELL_COMMANDS = "shell_commands"
DOWNLOAD_JOB = "download_job"
COMMAND_CATEGORIES = {
AC.ABORT: CommandCategory.MANAGE_JOB,
AC.ABORT_JOB: CommandCategory.MANAGE_JOB,
AC.START_APP: CommandCategory.MANAGE_JOB,
AC.DELETE_JOB: CommandCategory.MANAGE_JOB,
AC.DELETE_WORKSPACE: CommandCategory.MANAGE_JOB,
AC.CHECK_STATUS: CommandCategory.VIEW,
AC.SHOW_SCOPES: CommandCategory.VIEW,
AC.SHOW_STATS: CommandCategory.VIEW,
AC.RESET_ERRORS: CommandCategory.VIEW,
AC.SHOW_ERRORS: CommandCategory.VIEW,
AC.LIST_JOBS: CommandCategory.VIEW,
AC.GET_JOB_META: CommandCategory.VIEW,
AC.SYS_INFO: CommandCategory.OPERATE,
AC.REPORT_RESOURCES: CommandCategory.OPERATE,
AC.RESTART: CommandCategory.OPERATE,
AC.SHUTDOWN: CommandCategory.OPERATE,
AC.REMOVE_CLIENT: CommandCategory.OPERATE,
AC.SET_TIMEOUT: CommandCategory.OPERATE,
AC.CALL: CommandCategory.OPERATE,
AC.SHELL_CAT: CommandCategory.SHELL_COMMANDS,
AC.SHELL_GREP: CommandCategory.SHELL_COMMANDS,
AC.SHELL_HEAD: CommandCategory.SHELL_COMMANDS,
AC.SHELL_LS: CommandCategory.SHELL_COMMANDS,
AC.SHELL_PWD: CommandCategory.SHELL_COMMANDS,
AC.SHELL_TAIL: CommandCategory.SHELL_COMMANDS,
AC.DOWNLOAD_JOB: CommandCategory.DOWNLOAD_JOB,
AC.DOWNLOAD_JOB_FILE: CommandCategory.DOWNLOAD_JOB,
}
class FLAuthorizer(Authorizer):
def __init__(self, for_org: str, policy_config: dict):
"""System-wide authorization class.
Examine if a user has certain rights on a specific site
based on authorization.json file.
"""
assert isinstance(policy_config, dict), "policy_config must be a dict but got {}".format(type(policy_config))
Authorizer.__init__(self, for_org, COMMAND_CATEGORIES)
err = self.load_policy(policy_config)
if err:
raise SyntaxError("invalid policy config: {}".format(err))
class EmptyAuthorizer(Authorizer):
def __init__(self):
Authorizer.__init__(self, "dummy")
def authorize(self, ctx: AuthzContext) -> Tuple[bool, str]:
return True, ""
| NVFlare-main | nvflare/security/security.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/security/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
from pathlib import Path
from typing import List, Optional
from pyhocon import ConfigFactory as CF
from pyhocon import ConfigTree, HOCONConverter
from nvflare.fuel.utils.config import ConfigFormat
from nvflare.fuel_opt.utils.pyhocon_loader import PyhoconConfig
from nvflare.tool.job.job_client_const import JOB_TEMPLATES
def get_home_dir() -> Path:
return Path.home()
def get_hidden_nvflare_config_path(hidden_nvflare_dir: str) -> str:
"""
Get the path for the hidden nvflare configuration file.
Args:
hidden_nvflare_dir: ~/.nvflare directory
Returns:
str: The path to the hidden nvflare configuration file.
"""
hidden_nvflare_config_file = os.path.join(hidden_nvflare_dir, "config.conf")
return str(hidden_nvflare_config_file)
def create_hidden_nvflare_dir():
hidden_nvflare_dir = get_hidden_nvflare_dir()
if not hidden_nvflare_dir.exists():
try:
hidden_nvflare_dir.mkdir(exist_ok=True)
except OSError as e:
raise RuntimeError(f"Error creating the hidden nvflare directory: {e}")
return hidden_nvflare_dir
def get_hidden_nvflare_dir() -> pathlib.Path:
home_dir = get_home_dir()
hidden_nvflare_dir = pathlib.Path(home_dir) / ".nvflare"
return hidden_nvflare_dir
def load_config(config_file_path) -> Optional[ConfigTree]:
if os.path.isfile(config_file_path):
return CF.parse_file(config_file_path)
else:
return None
def find_startup_kit_location() -> str:
nvflare_config = load_hidden_config()
return nvflare_config.get_string("startup_kit.path", None) if nvflare_config else None
def load_hidden_config() -> ConfigTree:
hidden_dir = create_hidden_nvflare_dir()
hidden_nvflare_config_file = get_hidden_nvflare_config_path(str(hidden_dir))
nvflare_config = load_config(hidden_nvflare_config_file)
return nvflare_config
def create_startup_kit_config(nvflare_config: ConfigTree, startup_kit_dir: Optional[str] = None) -> ConfigTree:
"""
Args:
startup_kit_dir: specified startup kit location
nvflare_config (ConfigTree): The existing nvflare configuration.
Returns:
ConfigTree: The merged configuration tree.
"""
old_startup_kit_dir = nvflare_config.get_string("startup_kit", None)
if old_startup_kit_dir is None and (startup_kit_dir is not None and not os.path.isdir(startup_kit_dir)):
raise ValueError(f"invalid startup kit location '{startup_kit_dir}'")
if startup_kit_dir:
startup_kit_dir = get_startup_kit_dir(startup_kit_dir)
conf_str = f"""
startup_kit {{
path = {startup_kit_dir}
}}
"""
conf: ConfigTree = CF.parse_string(conf_str)
return conf.with_fallback(nvflare_config)
else:
return nvflare_config
def create_poc_workspace_config(nvflare_config: ConfigTree, poc_workspace_dir: Optional[str] = None) -> ConfigTree:
"""
Args:
poc_workspace_dir: specified poc_workspace_dir
nvflare_config (ConfigTree): The existing nvflare configuration.
Returns:
ConfigTree: The merged configuration tree.
"""
if poc_workspace_dir is None:
return nvflare_config
poc_workspace_dir = os.path.abspath(poc_workspace_dir)
conf_str = f"""
poc_workspace {{
path = {poc_workspace_dir}
}}
"""
conf: ConfigTree = CF.parse_string(conf_str)
return conf.with_fallback(nvflare_config)
def create_job_template_config(nvflare_config: ConfigTree, job_templates_dir: Optional[str] = None) -> ConfigTree:
"""
Args:
job_templates_dir: specified job template directory
nvflare_config (ConfigTree): The existing nvflare configuration.
Returns:
ConfigTree: The merged configuration tree.
"""
if job_templates_dir is None:
return nvflare_config
job_templates_dir = os.path.abspath(job_templates_dir)
conf_str = f"""
job_template {{
path = {job_templates_dir}
}}
"""
conf: ConfigTree = CF.parse_string(conf_str)
return conf.with_fallback(nvflare_config)
def check_dir(dir_path: str):
if not dir_path or not os.path.isdir(dir_path):
raise ValueError(f"directory {dir_path} doesn't exists")
def get_startup_kit_dir(startup_kit_dir: Optional[str] = None) -> str:
if not startup_kit_dir:
# load from config file:
startup_kit_dir = find_startup_kit_location()
if startup_kit_dir is None:
startup_kit_dir = os.getenv("NVFLARE_STARTUP_KIT_DIR")
if startup_kit_dir is None or len(startup_kit_dir.strip()) == 0:
raise ValueError("startup kit directory is not specified")
check_startup_dir(startup_kit_dir)
startup_kit_dir = os.path.abspath(startup_kit_dir)
return startup_kit_dir
def check_startup_dir(startup_kit_dir):
if not startup_kit_dir or not os.path.isdir(startup_kit_dir):
raise ValueError(
f"startup_kit_dir '{startup_kit_dir}' must be a valid and non-empty path. "
f"use 'nvflare poc' command to 'prepare' if you are using POC mode. Or use"
f" 'nvflare config' to setup startup_kit_dir location if you are in production"
)
def find_job_templates_location(job_templates_dir: Optional[str] = None):
def check_job_templates_dir(job_temp_dir: str):
if job_temp_dir:
if not os.path.isdir(job_temp_dir):
raise ValueError(f"Invalid job template directory {job_temp_dir}")
if job_templates_dir is None:
nvflare_home = os.environ.get("NVFLARE_HOME", None)
if nvflare_home:
job_templates_dir = os.path.join(nvflare_home, JOB_TEMPLATES)
if job_templates_dir is None:
nvflare_config = load_hidden_config()
job_templates_dir = nvflare_config.get_string("job_template.path", None) if nvflare_config else None
if job_templates_dir:
check_job_templates_dir(job_templates_dir)
if not job_templates_dir:
raise ValueError(
"Required job_template directory is not specified. "
"Please check ~/.nvflare/config.conf or set env variable NVFLARE_HOME "
)
return job_templates_dir
def get_curr_dir():
return os.path.curdir
def is_dir_empty(path: str):
targe_dir = os.listdir(path)
return len(targe_dir) == 0
def hocon_to_string(target_fmt: ConfigFormat, dst_config: ConfigTree):
if target_fmt == ConfigFormat.JSON:
return HOCONConverter.to_json(dst_config)
elif target_fmt == ConfigFormat.PYHOCON:
return HOCONConverter.to_hocon(dst_config)
elif target_fmt == ConfigFormat.OMEGACONF:
from nvflare.fuel_opt.utils.omegaconf_loader import OmegaConfLoader
loader = OmegaConfLoader()
dst_dict_config = PyhoconConfig(dst_config).to_dict()
omega_conf = loader.load_config_from_dict(dst_dict_config)
return omega_conf.to_str()
def save_config(dst_config, dst_path, keep_origin_format: bool = True):
if dst_path is None or dst_path.rindex(".") == -1:
raise ValueError(f"configuration file path '{dst_path}' can't be None or has no extension")
require_clean_up = False
if keep_origin_format:
original_ext = os.path.basename(dst_path).split(".")[1]
fmt = ConfigFormat.config_ext_formats().get(f".{original_ext}", None)
if fmt is None:
raise ValueError(f"invalid file extension {dst_path}, no corresponding configuration format")
dst_config_path = dst_path
else:
fmt = ConfigFormat.PYHOCON
ext = ConfigFormat.extensions(fmt)[0]
if dst_path.endswith(ext):
dst_config_path = dst_path
else:
filename = f"{os.path.basename(dst_path).split('.')[0]}{ext}"
dst_config_path = os.path.join(os.path.dirname(dst_path), filename)
require_clean_up = True
config_str = hocon_to_string(fmt, dst_config)
with open(dst_config_path, "w") as outfile:
outfile.write(f"{config_str}\n")
if require_clean_up:
if os.path.exists(dst_path):
os.remove(dst_path)
def get_hidden_config():
hidden_nvflare_config_file = get_hidden_nvflare_config_path(str(create_hidden_nvflare_dir()))
conf = load_hidden_config()
nvflare_config = CF.parse_string("{}") if not conf else conf
return hidden_nvflare_config_file, nvflare_config
def find_in_list(arr: List, item) -> bool:
if arr is None:
return False
found = False
for a in arr:
if a.__eq__(item):
return True
return found
def append_if_not_in_list(arr: List, item) -> List:
if item is None:
return arr
if arr is None:
arr = []
if not find_in_list(arr, item):
arr.append(item)
return arr
| NVFlare-main | nvflare/utils/cli_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import timeit
def collect_time(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if "reset" in kwargs and kwargs["reset"]:
wrapper.time_taken = 0
wrapper.count = 0
else:
start = timeit.default_timer()
result = func(*args, **kwargs)
wrapper.time_taken += (timeit.default_timer() - start) * 1000.0
wrapper.count += 1
return result
wrapper.time_taken = 0
wrapper.count = 0
return wrapper
def measure_time(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = timeit.default_timer()
result = func(*args, **kwargs)
duration = (timeit.default_timer() - start) * 1000.0
wrapper.time_taken = duration
return result
return wrapper
| NVFlare-main | nvflare/utils/decorators.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel_opt/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from nvflare.fuel.utils.config import Config, ConfigFormat, ConfigLoader
class PyhoconConfig(Config):
def __init__(self, conf, file_path: Optional[str] = None):
super(PyhoconConfig, self).__init__(conf, ConfigFormat.PYHOCON, file_path)
def to_dict(self, resolve: Optional[bool] = True) -> Dict:
return self._convert_conf_item(self.conf)
def to_str(self, element: Optional[Dict] = None) -> str:
from pyhocon import ConfigFactory as CF
from pyhocon.converter import HOCONConverter
if element is None:
return HOCONConverter.to_hocon(self.conf)
else:
config = CF.from_dict(element)
return HOCONConverter.to_hocon(config)
def _convert_conf_item(self, conf_item):
from pyhocon import ConfigTree
result = {}
if isinstance(conf_item, ConfigTree):
if len(conf_item) > 0:
for key, item in conf_item.items():
new_key = key.strip('"') # for dotted keys enclosed with "" to not be interpreted as nested key
new_value = self._convert_conf_item(item)
result[new_key] = new_value
elif isinstance(conf_item, list):
if len(conf_item) > 0:
result = [self._convert_conf_item(item) for item in conf_item]
else:
result = []
elif conf_item is True:
return True
elif conf_item is False:
return False
else:
return conf_item
return result
class PyhoconLoader(ConfigLoader):
def __init__(self):
super(PyhoconLoader, self).__init__(ConfigFormat.PYHOCON)
def load_config(self, file_path: str) -> Config:
from pyhocon import ConfigTree
conf: ConfigTree = self._from_file(file_path)
return PyhoconConfig(conf, file_path)
def load_config_from_str(self, config_str: str) -> Config:
from pyhocon import ConfigFactory as CF
conf = CF.parse_string(config_str)
return PyhoconConfig(conf)
def load_config_from_dict(self, config_dict: dict) -> Config:
from pyhocon import ConfigFactory as CF
conf = CF.from_dict(config_dict)
return PyhoconConfig(conf)
def _from_file(self, file_path):
from pyhocon import ConfigFactory as CF
return CF.parse_file(file_path)
| NVFlare-main | nvflare/fuel_opt/utils/pyhocon_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel_opt/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from nvflare.fuel.utils.config import Config, ConfigFormat, ConfigLoader
class OmegaConfConfig(Config):
def __init__(self, conf, file_path: Optional[str] = None):
super(OmegaConfConfig, self).__init__(conf, ConfigFormat.OMEGACONF, file_path)
def to_dict(self, resolve: Optional[bool] = True) -> Dict:
from omegaconf import OmegaConf
return OmegaConf.to_container(self.conf, resolve=resolve)
def to_str(self, element: Optional[Dict] = None) -> str:
from omegaconf import OmegaConf
if element is None:
return OmegaConf.to_yaml(self.conf)
else:
config = OmegaConf.create(element)
return OmegaConf.to_yaml(config)
class OmegaConfLoader(ConfigLoader):
def __init__(self):
super(OmegaConfLoader, self).__init__(ConfigFormat.OMEGACONF)
def load_config(self, file_path: str) -> Config:
conf = self._from_file(file_path)
return OmegaConfConfig(conf, file_path)
def load_config_from_str(self, config_str: str) -> Config:
from omegaconf import OmegaConf
conf = OmegaConf.create(config_str)
return OmegaConfConfig(conf)
def load_config_from_dict(self, config_dict: dict) -> Config:
from omegaconf import OmegaConf
conf = OmegaConf.create(config_dict)
return OmegaConfConfig(conf)
def _from_file(self, file_path):
from omegaconf import OmegaConf
return OmegaConf.load(file_path)
| NVFlare-main | nvflare/fuel_opt/utils/omegaconf_loader.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flask configuration variables."""
import os
from datetime import timedelta
from nvflare.lighter.utils import generate_password
class Config:
# General Config
SECRET_KEY = os.environ.get("SECRET_KEY", generate_password(16))
JWT_ACCESS_TOKEN_EXPIRES = timedelta(minutes=30)
# Database
web_root = os.environ.get("NVFL_WEB_ROOT", "/var/tmp/nvflare/dashboard")
default_sqlite_file = os.path.join(web_root, "db.sqlite")
default_sqlite_url = f"sqlite:///{default_sqlite_file}"
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL", default_sqlite_url)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
| NVFlare-main | nvflare/dashboard/config.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/dashboard/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import signal
import subprocess
import sys
import docker
import nvflare
from nvflare.apis.utils.format_check import name_check
from nvflare.dashboard.application.blob import _write
from nvflare.lighter import tplt_utils, utils
supported_csp = ("azure", "aws")
def start(args):
cwd = os.getcwd()
if not args.folder:
folder = cwd
else:
folder = os.path.join(cwd, args.folder)
environment = dict()
env_vars = args.env
if env_vars:
for e in env_vars:
splitted = e.split("=")
environment[splitted[0]] = splitted[1]
passphrase = args.passphrase
if passphrase:
environment["NVFL_DASHBOARD_PP"] = passphrase
if args.cred:
environment.update({"NVFL_CREDENTIAL": args.cred})
elif not os.path.exists(os.path.join(folder, ".db_init_done")):
need_email = True
while need_email:
answer = input(
"Please provide project admin email address. This person will be the super user of the dashboard and this project.\n"
)
error, reason = name_check(answer, "email")
if error:
print(f"Expecting an email address, but got one in an invalid format. Reason: {reason}")
else:
need_email = False
print("generating random password")
pwd = utils.generate_password(8)
print(f"Project admin credential is {answer} and the password is {pwd}")
environment.update({"NVFL_CREDENTIAL": f"{answer}:{pwd}"})
try:
client = docker.from_env()
except docker.errors.DockerException:
print("Unable to communicate to docker daemon/socket. Please make sure your docker is up and running.")
exit(0)
version = nvflare.__version__
dashboard_image = f"nvflare/nvflare:{version}"
if args.image:
if dashboard_image != args.image:
print(
f"Current dashboard container image is nvflare/nvflare:{version}, but requesting to use {args.image}. Use it at your own risk."
)
dashboard_image = args.image
try:
print(f"Pulling {dashboard_image}, may take some time to finish.")
_ = client.images.pull(dashboard_image)
except docker.errors.APIError:
print(f"unable to pull {dashboard_image}")
exit(1)
print(f"Launching {dashboard_image}")
print(f"Dashboard will listen to port {args.port}")
print(f"{folder} on host mounted to /var/tmp/nvflare/dashboard in container")
if environment:
print(f"environment vars set to {environment}")
else:
print("No additional environment variables set to the launched container.")
try:
container_obj = client.containers.run(
dashboard_image,
entrypoint=["/usr/local/bin/python3", "nvflare/dashboard/wsgi.py"],
detach=True,
auto_remove=True,
name="nvflare-dashboard",
ports={8443: args.port},
volumes={folder: {"bind": "/var/tmp/nvflare/dashboard", "model": "rw"}},
environment=environment,
)
except docker.errors.APIError as e:
print(f"Either {dashboard_image} image does not exist or another nvflare-dashboard instance is still running.")
print("Please either provide an existing container image or stop the running container instance.")
print(e)
exit(1)
if container_obj:
print("Dashboard container started")
print("Container name nvflare-dashboard")
print(f"id is {container_obj.id}")
else:
print("Container failed to start")
def stop():
try:
client = docker.from_env()
except docker.errors.DockerException:
print("Unable to communicate to docker daemon/socket. Please make sure your docker is up and running.")
exit(0)
try:
container_obj = client.containers.get("nvflare-dashboard")
except docker.errors.NotFound:
print("No nvflare-dashboard container found")
exit(0)
container_obj.kill(signal=signal.SIGINT)
print("nvflare-dashboard exited")
def cloud(args):
lighter_folder = os.path.dirname(utils.__file__)
template = utils.load_yaml(os.path.join(lighter_folder, "impl", "master_template.yml"))
tplt = tplt_utils.Template(template)
cwd = os.getcwd()
csp = args.cloud
dest = os.path.join(cwd, f"{csp}_start_dsb.sh")
dsb_start = template[f"{csp}_start_dsb_sh"]
version = nvflare.__version__
replacement_dict = {"NVFLARE": f"nvflare=={version}", "START_OPT": f"-i {args.image}" if args.image else ""}
_write(
dest,
utils.sh_replace(tplt.get_cloud_script_header() + dsb_start, replacement_dict),
"t",
exe=True,
)
print(f"Dashboard launch script for cloud is written at {dest}. Now running the script.")
_ = subprocess.run(dest)
os.remove(dest)
def has_no_arguments() -> bool:
last_item = sys.argv[-1]
return (
last_item.endswith("dashboard.cli") or last_item.endswith("dashboard/cli.py") or last_item.endswith("dashboard")
)
def main():
parser = argparse.ArgumentParser()
define_dashboard_parser(parser)
args = parser.parse_args()
handle_dashboard(args)
def define_dashboard_parser(parser):
parser.add_argument(
"--cloud",
type=str,
default="",
help="launch dashboard on cloud service provider (ex: --cloud azure or --cloud aws)",
)
parser.add_argument("--start", action="store_true", help="start dashboard")
parser.add_argument("--stop", action="store_true", help="stop dashboard")
parser.add_argument("-p", "--port", type=str, default="443", help="port to listen")
parser.add_argument(
"-f", "--folder", type=str, help="folder containing necessary info (default: current working directory)"
)
parser.add_argument(
"--passphrase", help="Passphrase to encrypt/decrypt root CA private key. !!! Do not share it with others. !!!"
)
parser.add_argument("-e", "--env", action="append", help="additonal environment variables: var1=value1")
parser.add_argument("--cred", help="set credential directly in the form of USER_EMAIL:PASSWORD")
parser.add_argument("-i", "--image", help="set the container image name")
def handle_dashboard(args):
support_csp_string = ", ".join(supported_csp)
if args.stop:
stop()
elif args.start:
start(args)
elif args.cloud:
if args.cloud in supported_csp:
cloud(args)
else:
print(
f"Currently --cloud support the following options: {support_csp_string}. However, {args.cloud} is requested."
)
else:
print("Please use -h option to see usage")
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/dashboard/cli.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ssl
from application import init_app
app = init_app()
if __name__ == "__main__":
web_root = os.environ.get("NVFL_WEB_ROOT", "/var/tmp/nvflare/dashboard")
web_crt = os.path.join(web_root, "cert", "web.crt")
web_key = os.path.join(web_root, "cert", "web.key")
port = os.environ.get("NVFL_WEB_PORT", "8443")
if os.path.exists(web_crt) and os.path.exists(web_key):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ssl_context.load_cert_chain(web_crt, web_key)
else:
ssl_context = None
app.run(host="0.0.0.0", port=port, ssl_context=ssl_context)
| NVFlare-main | nvflare/dashboard/wsgi.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from werkzeug.security import check_password_hash, generate_password_hash
from .blob import gen_client, gen_overseer, gen_server, gen_user
from .cert import Entity, make_root_cert
from .models import Capacity, Client, Organization, Project, Role, User, db
log = logging.getLogger(__name__)
def check_role(id, claims, requester):
is_creator = requester == Store._get_email_by_id(id)
is_project_admin = claims.get("role") == "project_admin"
return is_creator, is_project_admin
def _dict_or_empty(item):
return item.asdict() if item else {}
def get_or_create(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance
else:
instance = model(**kwargs)
session.add(instance)
session.commit()
return instance
def add_ok(obj):
obj.update({"status": "ok"})
return obj
def inc_dl(model, id):
instance = model.query.get(id)
instance.download_count = instance.download_count + 1
db.session.add(instance)
db.session.commit()
class Store(object):
@classmethod
def ready(cls):
user = User.query.get(1)
return user.approval_state >= 100 if user else False
@classmethod
def seed_user(cls, email, pwd):
seed_user = {
"name": "super_name",
"email": email,
"password": pwd,
"organization": "",
"role": "project_admin",
"approval_state": 200,
}
cls.create_user(seed_user)
cls.create_project()
return email, pwd
@classmethod
def init_db(cls):
db.drop_all()
db.create_all()
return add_ok({})
@classmethod
def create_project(cls):
project = Project()
db.session.add(project)
db.session.commit()
return add_ok({"project": _dict_or_empty(project)})
@classmethod
def build_project(cls, project):
entity = Entity(project.short_name)
cert_pair = make_root_cert(entity)
project.root_cert = cert_pair.ser_cert
project.root_key = cert_pair.ser_pri_key
db.session.add(project)
db.session.commit()
return add_ok({"project": _dict_or_empty(project)})
@classmethod
def _add_registered_info(cls, project_dict):
project_dict["num_clients"] = Client.query.count()
project_dict["num_orgs"] = Organization.query.count()
project_dict["num_users"] = User.query.count()
return project_dict
@classmethod
def set_project(cls, req):
project = Project.query.first()
if project.frozen:
return {"status": "Project is frozen"}
req.pop("id", None)
short_name = req.pop("short_name", "")
if short_name:
if len(short_name) > 16:
short_name = short_name[:16]
project.short_name = short_name
for k, v in req.items():
setattr(project, k, v)
db.session.add(project)
db.session.commit()
if project.frozen:
cls.build_project(project)
project_dict = _dict_or_empty(project)
project_dict = cls._add_registered_info(project_dict)
return add_ok({"project": project_dict})
@classmethod
def get_project(cls):
project_dict = _dict_or_empty(Project.query.first())
project_dict = cls._add_registered_info(project_dict)
return add_ok({"project": project_dict})
@classmethod
def get_overseer_blob(cls, key):
fileobj, filename = gen_overseer(key)
return fileobj, filename
@classmethod
def get_server_blob(cls, key, first_server=True):
fileobj, filename = gen_server(key, first_server)
return fileobj, filename
@classmethod
def get_orgs(cls):
all_orgs = Organization.query.all()
return add_ok({"client_list": [_dict_or_empty(org) for org in all_orgs]})
@classmethod
def _is_approved_by_client_id(cls, id):
client = Client.query.get(id)
return client.approval_state >= 100
@classmethod
def _is_approved_by_user_id(cls, id):
user = User.query.get(id)
return user.approval_state >= 100
@classmethod
def create_client(cls, req, creator):
creator_id = User.query.filter_by(email=creator).first().id
name = req.get("name")
organization = req.get("organization", "")
capacity = req.get("capacity")
description = req.get("description", "")
org = get_or_create(db.session, Organization, name=organization)
if capacity is not None:
cap = get_or_create(db.session, Capacity, capacity=json.dumps(capacity))
client = Client(name=name, description=description, creator_id=creator_id)
client.organization_id = org.id
client.capacity_id = cap.id
try:
db.session.add(client)
db.session.commit()
except Exception as e:
log.error(f"Error while creating client: {e}")
return None
return add_ok({"client": _dict_or_empty(client)})
@classmethod
def get_clients(cls, org=None):
if org is None:
all_clients = Client.query.all()
else:
all_clients = Organization.query.filter_by(name=org).first().clients
return add_ok({"client_list": [_dict_or_empty(client) for client in all_clients]})
@classmethod
def get_creator_id_by_client_id(cls, id):
client = Client.query.get(id)
if client:
creator_id = client.creator_id
return creator_id
else:
return None
@classmethod
def get_client(cls, id):
client = Client.query.get(id)
return add_ok({"client": _dict_or_empty(client)})
@classmethod
def patch_client_by_project_admin(cls, id, req):
client = Client.query.get(id)
organization = req.pop("organization", None)
if organization is not None:
org = get_or_create(db.session, Organization, name=organization)
client.organization_id = org.id
capacity = req.pop("capacity", None)
if capacity is not None:
capacity = json.dumps(capacity)
cap = get_or_create(db.session, Capacity, capacity=capacity)
client.capacity_id = cap.id
for k, v in req.items():
setattr(client, k, v)
try:
db.session.add(client)
db.session.commit()
except Exception as e:
log.error(f"Error while patching client: {e}")
return None
return add_ok({"client": _dict_or_empty(client)})
@classmethod
def patch_client_by_creator(cls, id, req):
client = Client.query.get(id)
_ = req.pop("approval_state", None)
organization = req.pop("organization", None)
if organization is not None:
org = get_or_create(db.session, Organization, name=organization)
client.organization_id = org.id
capacity = req.pop("capacity", None)
if capacity is not None:
capacity = json.dumps(capacity)
cap = get_or_create(db.session, Capacity, capacity=capacity)
client.capacity_id = cap.id
for k, v in req.items():
setattr(client, k, v)
try:
db.session.add(client)
db.session.commit()
except Exception as e:
log.error(f"Error while patching client: {e}")
return None
return add_ok({"client": _dict_or_empty(client)})
@classmethod
def delete_client(cls, id):
client = Client.query.get(id)
db.session.delete(client)
db.session.commit()
return add_ok({})
@classmethod
def get_client_blob(cls, key, id):
fileobj, filename = gen_client(key, id)
inc_dl(Client, id)
return fileobj, filename
@classmethod
def create_user(cls, req):
name = req.get("name", "")
email = req.get("email")
password = req.get("password", "")
password_hash = generate_password_hash(password)
organization = req.get("organization", "")
role_name = req.get("role", "")
description = req.get("description", "")
approval_state = req.get("approval_state", 0)
org = get_or_create(db.session, Organization, name=organization)
role = get_or_create(db.session, Role, name=role_name)
try:
user = User(
email=email,
name=name,
password_hash=password_hash,
description=description,
approval_state=approval_state,
)
user.organization_id = org.id
user.role_id = role.id
db.session.add(user)
db.session.commit()
except Exception as e:
log.error(f"Error while creating user: {e}")
return None
return add_ok({"user": _dict_or_empty(user)})
@classmethod
def verify_user(cls, email, password):
user = User.query.filter_by(email=email).first()
if user is not None and check_password_hash(user.password_hash, password):
return user
else:
return None
@classmethod
def get_users(cls, org_name=None):
if org_name is None:
all_users = User.query.all()
else:
org = Organization.query.filter_by(name=org_name).first()
if org:
all_users = org.users
else:
all_users = {}
return add_ok({"user_list": [_dict_or_empty(user) for user in all_users]})
@classmethod
def _get_email_by_id(cls, id):
user = User.query.get(id)
return user.email if user else None
@classmethod
def get_user(cls, id):
user = User.query.get(id)
return add_ok({"user": _dict_or_empty(user)})
@classmethod
def patch_user_by_project_admin(cls, id, req):
user = User.query.get(id)
org_name = req.pop("organization", None)
if org_name is not None:
org = get_or_create(db.session, Organization, name=org_name)
user.organization_id = org.id
role_name = req.pop("role", None)
if role_name is not None:
role = get_or_create(db.session, Role, name=role_name)
user.role_id = role.id
password = req.pop("password", None)
if password is not None:
password_hash = generate_password_hash(password)
user.password_hash = password_hash
for k, v in req.items():
setattr(user, k, v)
db.session.add(user)
db.session.commit()
return add_ok({"user": _dict_or_empty(user)})
@classmethod
def patch_user_by_creator(cls, id, req):
user = User.query.get(id)
_ = req.pop("approval_state", None)
role = req.pop("role", None)
if role is not None and user.role.name == "":
role = get_or_create(db.session, Role, name=role)
user.role_id = role.id
organization = req.pop("organization", None)
if organization is not None and user.organization.name == "":
org = get_or_create(db.session, Organization, name=organization)
user.organization_id = org.id
password = req.pop("password", None)
if password is not None:
password_hash = generate_password_hash(password)
user.password_hash = password_hash
for k, v in req.items():
setattr(user, k, v)
db.session.add(user)
db.session.commit()
return add_ok({"user": _dict_or_empty(user)})
@classmethod
def delete_user(cls, id):
clients = Client.query.filter_by(creator_id=id).all()
for client in clients:
db.session.delete(client)
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return add_ok({})
@classmethod
def get_user_blob(cls, key, id):
fileobj, filename = gen_user(key, id)
inc_dl(User, id)
return fileobj, filename
| NVFlare-main | nvflare/dashboard/application/store.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from datetime import datetime
from . import db
class CommonMixin(object):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(512), default="")
description = db.Column(db.String(512), default="")
created_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
def asdict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Organization(CommonMixin, db.Model):
def asdict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns if c.name in ("name",)}
class Role(CommonMixin, db.Model):
pass
class Capacity(db.Model):
id = db.Column(db.Integer, primary_key=True)
capacity = db.Column(db.String(1024), default="")
created_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
def asdict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Project(db.Model):
id = db.Column(db.Integer, primary_key=True)
frozen = db.Column(db.Boolean, default=False)
public = db.Column(db.Boolean, default=False)
short_name = db.Column(db.String(128), default="")
title = db.Column(db.String(512), default="")
description = db.Column(db.String(2048), default="")
# scheme = db.Column(db.String(16), default="grpc")
app_location = db.Column(db.String(2048), default="")
ha_mode = db.Column(db.Boolean, default=False)
starting_date = db.Column(db.String(128), default="")
end_date = db.Column(db.String(128), default="")
overseer = db.Column(db.String(128), default="")
server1 = db.Column(db.String(128), default="")
server2 = db.Column(db.String(128), default="")
root_cert = db.Column(db.String(4096), default="")
root_key = db.Column(db.String(4096), default="")
def asdict(self):
table_dict = {
c.name: getattr(self, c.name)
for c in self.__table__.columns
if c.name not in ["id", "root_cert", "root_key"]
}
return table_dict
class Client(CommonMixin, db.Model):
capacity_id = db.Column(db.Integer, db.ForeignKey("capacity.id"), nullable=False)
name = db.Column(db.String(512), unique=True)
organization_id = db.Column(db.Integer, db.ForeignKey("organization.id"), nullable=False)
capacity = db.relationship("Capacity", backref="clients")
organization = db.relationship("Organization", backref="clients")
creator_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=False)
approval_state = db.Column(db.Integer, default=0)
download_count = db.Column(db.Integer, default=0)
def asdict(self):
table_dict = {c.name: getattr(self, c.name) for c in self.__table__.columns if "_id" not in c.name}
table_dict.update({"organization": self.organization.name, "capacity": json.loads(self.capacity.capacity)})
return table_dict
class User(CommonMixin, db.Model):
email = db.Column(db.String(128), unique=True)
password_hash = db.Column(db.String(128))
role_id = db.Column(db.Integer, db.ForeignKey("role.id"), nullable=False)
role = db.relationship("Role", backref="users")
organization_id = db.Column(db.Integer, db.ForeignKey("organization.id"), nullable=False)
organization = db.relationship("Organization", backref="users")
approval_state = db.Column(db.Integer, default=0)
download_count = db.Column(db.Integer, default=0)
def asdict(self):
table_dict = {c.name: getattr(self, c.name) for c in self.__table__.columns if "_id" not in c.name}
table_dict.update({"organization": self.organization.name, "role": self.role.name})
table_dict.pop("password_hash")
return table_dict
| NVFlare-main | nvflare/dashboard/application/models.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import current_app as app
from flask import jsonify, make_response, request
from flask_jwt_extended import get_jwt, get_jwt_identity, jwt_required
from .store import Store, check_role
@app.route("/api/v1/users", methods=["POST"])
def create_one_user():
req = request.json
result = Store.create_user(req)
if result is not None:
return jsonify(result), 201
else:
return jsonify({"status": "conflicting"}), 409
@app.route("/api/v1/users", methods=["GET"])
@jwt_required()
def get_all_users():
claims = get_jwt()
if claims.get("role") == "project_admin":
result = Store.get_users()
else:
org_name = claims.get("organization", "")
result = Store.get_users(org_name=org_name)
return jsonify(result)
@app.route("/api/v1/users/<id>", methods=["GET"])
@jwt_required()
def get_one_user(id):
c, p = check_role(id, get_jwt(), get_jwt_identity())
if not c and not p:
return jsonify({"status": "unauthorized"}), 403
return jsonify(Store.get_user(id))
@app.route("/api/v1/users/<id>", methods=["PATCH", "DELETE"])
@jwt_required()
def update_user(id):
c, p = check_role(id, get_jwt(), get_jwt_identity())
if not c and not p:
return jsonify({"status": "unauthorized"}), 403
if request.method == "PATCH":
req = request.json
req.pop("email", None)
if p:
result = Store.patch_user_by_project_admin(id, req)
elif c:
result = Store.patch_user_by_creator(id, req)
elif request.method == "DELETE":
result = Store.delete_user(id)
else:
result = {"status": "error"}
return jsonify(result)
@app.route("/api/v1/users/<int:id>/blob", methods=["POST"])
@jwt_required()
def user_blob(id):
if not Store._is_approved_by_user_id(id):
return jsonify({"status": "not approved yet"}), 200
c, p = check_role(id, get_jwt(), get_jwt_identity())
if not c and not p:
return jsonify({"status": "unauthorized"}), 403
pin = request.json.get("pin")
fileobj, filename = Store.get_user_blob(pin, id)
response = make_response(fileobj.read())
response.headers.set("Content-Type", "zip")
response.headers.set("Content-Disposition", f'attachment; filename="{filename}"')
return response
| NVFlare-main | nvflare/dashboard/application/users.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import Flask
from flask_jwt_extended import JWTManager
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
jwt = JWTManager()
def init_app():
os.makedirs("/var/tmp/nvflare/dashboard", exist_ok=True)
static_folder = os.environ.get("NVFL_DASHBOARD_STATIC_FOLDER", "static")
app = Flask(__name__, static_url_path="", static_folder=static_folder)
app.config.from_object("nvflare.dashboard.config.Config")
db.init_app(app)
jwt.init_app(app)
with app.app_context():
from . import clients, project, users
from .store import Store
db.create_all()
if not Store.ready():
credential = os.environ.get("NVFL_CREDENTIAL")
if credential is None:
print("Please set env var NVFL_CREDENTIAL")
exit(1)
email = credential.split(":")[0]
pwd = credential.split(":")[1]
Store.seed_user(email, pwd)
with open(os.path.join("/var/tmp/nvflare/dashboard", ".db_init_done"), "ab") as f:
f.write(bytes())
return app
| NVFlare-main | nvflare/dashboard/application/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
from dataclasses import dataclass
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
dashboard_pp = os.environ.get("NVFL_DASHBOARD_PP")
if dashboard_pp is not None:
dashboard_pp = dashboard_pp.encode("utf-8")
@dataclass
class Entity:
"""Class for keeping track of each certificate owner."""
name: str
org: str = None
role: str = None
@dataclass
class CertPair:
"""Class for serialized private key and certificate."""
owner: Entity = None
ser_pri_key: str = None
ser_cert: str = None
def serialize_pri_key(pri_key, passphrase=None):
if passphrase is None or not isinstance(passphrase, bytes):
return pri_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
else:
return pri_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.BestAvailableEncryption(password=passphrase),
)
def serialize_cert(cert):
return cert.public_bytes(serialization.Encoding.PEM)
def generate_keys():
pri_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
pub_key = pri_key.public_key()
return pri_key, pub_key
def x509_name(cn_name, org_name=None, role=None):
name = [x509.NameAttribute(NameOID.COMMON_NAME, cn_name)]
if org_name is not None:
name.append(x509.NameAttribute(NameOID.ORGANIZATION_NAME, org_name))
if role:
name.append(x509.NameAttribute(NameOID.UNSTRUCTURED_NAME, role))
return x509.Name(name)
def generate_cert(subject, issuer, signing_pri_key, subject_pub_key, valid_days=360, ca=False):
x509_subject = x509_name(subject.name, subject.org, subject.role)
x509_issuer = x509_name(issuer.name, issuer.org, issuer.role)
builder = (
x509.CertificateBuilder()
.subject_name(x509_subject)
.issuer_name(x509_issuer)
.public_key(subject_pub_key)
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(
# Our certificate will be valid for 360 days
datetime.datetime.utcnow()
+ datetime.timedelta(days=valid_days)
# Sign our certificate with our private key
)
.add_extension(x509.SubjectAlternativeName([x509.DNSName(subject.name)]), critical=False)
)
if ca:
builder = (
builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(subject_pub_key),
critical=False,
)
.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_public_key(subject_pub_key),
critical=False,
)
.add_extension(x509.BasicConstraints(ca=True, path_length=None), critical=False)
)
return builder.sign(signing_pri_key, hashes.SHA256(), default_backend())
def _pack(entity, pri_key, cert, passphrase=None):
ser_pri_key = serialize_pri_key(pri_key, passphrase)
ser_cert = serialize_cert(cert)
cert_pair = CertPair(entity, ser_pri_key, ser_cert)
return cert_pair
def make_root_cert(subject: Entity):
pri_key, pub_key = generate_keys()
cert = generate_cert(subject=subject, issuer=subject, signing_pri_key=pri_key, subject_pub_key=pub_key, ca=True)
return _pack(subject, pri_key, cert, passphrase=dashboard_pp)
def make_cert(subject: Entity, issuer_cert_pair: CertPair):
pri_key, pub_key = generate_keys()
issuer_pri_key = deserialize_ca_key(issuer_cert_pair.ser_pri_key)
cert = generate_cert(subject, issuer_cert_pair.owner, issuer_pri_key, pub_key, valid_days=360, ca=False)
return _pack(subject, pri_key, cert, passphrase=None)
def deserialize_ca_key(ser_pri_key):
pri_key = serialization.load_pem_private_key(ser_pri_key, password=dashboard_pp, backend=default_backend())
return pri_key
| NVFlare-main | nvflare/dashboard/application/cert.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import current_app as app
from flask import jsonify, make_response, request
from flask_jwt_extended import get_jwt, get_jwt_identity, jwt_required
from .store import Store, check_role
@app.route("/api/v1/clients", methods=["POST"])
@jwt_required()
def create_one_client():
creator = get_jwt_identity()
req = request.json
result = Store.create_client(req, creator)
if result is not None:
return jsonify(result), 201
else:
return jsonify({"status": "conflicting"}), 409
@app.route("/api/v1/clients", methods=["GET"])
@jwt_required()
def get_all_clients():
result = Store.get_clients()
return jsonify(result)
@app.route("/api/v1/clients/<id>", methods=["GET"])
@jwt_required()
def get_one_client(id):
result = Store.get_client(id)
return jsonify(result)
@app.route("/api/v1/clients/<id>", methods=["PATCH", "DELETE"])
@jwt_required()
def update_client(id):
creator_id = Store.get_creator_id_by_client_id(id)
if creator_id is None:
return jsonify({"status": "error"}), 404
c, p = check_role(creator_id, get_jwt(), get_jwt_identity())
if not c and not p:
return jsonify({"status": "unauthorized"}), 403
if request.method == "PATCH":
req = request.json
if p:
result = Store.patch_client_by_project_admin(id, req)
elif c:
result = Store.patch_client_by_creator(id, req)
elif request.method == "DELETE":
result = Store.delete_client(id)
else:
result = {"status": "error"}
if result is not None:
return jsonify(result)
else:
return jsonify({"status": "conflicting"}), 409
@app.route("/api/v1/clients/<int:id>/blob", methods=["POST"])
@jwt_required()
def client_blob(id):
if not Store._is_approved_by_client_id(id):
return jsonify({"status": "not approved yet"}), 200
creator_id = Store.get_creator_id_by_client_id(id)
c, p = check_role(creator_id, get_jwt(), get_jwt_identity())
if p or c:
pin = request.json.get("pin")
fileobj, filename = Store.get_client_blob(pin, id)
response = make_response(fileobj.read())
response.headers.set("Content-Type", "zip")
response.headers.set("Content-Disposition", f'attachment; filename="{filename}"')
return response
else:
return jsonify({"status": "unauthorized"}), 403
| NVFlare-main | nvflare/dashboard/application/clients.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import os
import subprocess
import tempfile
from nvflare.lighter import tplt_utils, utils
from .cert import CertPair, Entity, deserialize_ca_key, make_cert
from .models import Client, Project, User
lighter_folder = os.path.dirname(utils.__file__)
template = utils.load_yaml(os.path.join(lighter_folder, "impl", "master_template.yml"))
def get_csp_template(csp, participant, template):
return template[f"{csp}_start_{participant}_sh"]
def get_csp_start_script_name(csp):
return f"{csp}_start.sh"
def _write(file_full_path, content, mode, exe=False):
mode = mode + "w"
with open(file_full_path, mode) as f:
f.write(content)
if exe:
os.chmod(file_full_path, 0o755)
def gen_overseer(key):
project = Project.query.first()
entity = Entity(project.overseer)
issuer = Entity(project.short_name)
signing_cert_pair = CertPair(issuer, project.root_key, project.root_cert)
cert_pair = make_cert(entity, signing_cert_pair)
with tempfile.TemporaryDirectory() as tmp_dir:
overseer_dir = os.path.join(tmp_dir, entity.name)
dest_dir = os.path.join(overseer_dir, "startup")
os.mkdir(overseer_dir)
os.mkdir(dest_dir)
_write(
os.path.join(dest_dir, "start.sh"),
template["start_ovsr_sh"],
"t",
exe=True,
)
_write(
os.path.join(dest_dir, "gunicorn.conf.py"),
utils.sh_replace(template["gunicorn_conf_py"], {"port": "8443"}),
"t",
exe=False,
)
_write(os.path.join(dest_dir, "overseer.crt"), cert_pair.ser_cert, "b", exe=False)
_write(os.path.join(dest_dir, "overseer.key"), cert_pair.ser_pri_key, "b", exe=False)
_write(os.path.join(dest_dir, "rootCA.pem"), project.root_cert, "b", exe=False)
run_args = ["zip", "-rq", "-P", key, "tmp.zip", "."]
subprocess.run(run_args, cwd=tmp_dir)
fileobj = io.BytesIO()
with open(os.path.join(tmp_dir, "tmp.zip"), "rb") as fo:
fileobj.write(fo.read())
fileobj.seek(0)
return fileobj, f"{entity.name}.zip"
def gen_server(key, first_server=True):
project = Project.query.first()
if first_server:
entity = Entity(project.server1)
fl_port = 8002
admin_port = 8003
else:
entity = Entity(project.server2)
fl_port = 8102
admin_port = 8103
issuer = Entity(project.short_name)
signing_cert_pair = CertPair(issuer, project.root_key, project.root_cert)
cert_pair = make_cert(entity, signing_cert_pair)
config = json.loads(template["fed_server"])
server_0 = config["servers"][0]
server_0["name"] = project.short_name
server_0["service"]["target"] = f"{entity.name}:{fl_port}"
server_0["service"]["scheme"] = project.scheme if hasattr(project, "scheme") else "grpc"
server_0["admin_host"] = entity.name
server_0["admin_port"] = admin_port
if project.ha_mode:
overseer_agent = {"path": "nvflare.ha.overseer_agent.HttpOverseerAgent"}
overseer_agent["args"] = {
"role": "server",
"overseer_end_point": f"https://{project.overseer}:8443/api/v1",
"project": project.short_name,
"name": entity.name,
"fl_port": str(fl_port),
"admin_port": str(admin_port),
}
else:
overseer_agent = {"path": "nvflare.ha.dummy_overseer_agent.DummyOverseerAgent"}
overseer_agent["args"] = {"sp_end_point": f"{project.server1}:8002:8003"}
config["overseer_agent"] = overseer_agent
replacement_dict = {
"admin_port": admin_port,
"fed_learn_port": fl_port,
"config_folder": "config",
"ha_mode": "true" if project.ha_mode else "false",
"docker_image": project.app_location.split(" ")[-1] if project.app_location else "nvflare/nvflare",
"org_name": "",
}
tplt = tplt_utils.Template(template)
with tempfile.TemporaryDirectory() as tmp_dir:
server_dir = os.path.join(tmp_dir, entity.name)
dest_dir = os.path.join(server_dir, "startup")
os.mkdir(server_dir)
os.mkdir(dest_dir)
_write(os.path.join(dest_dir, "fed_server.json"), json.dumps(config, indent=2), "t")
_write(
os.path.join(dest_dir, "docker.sh"),
utils.sh_replace(template["docker_svr_sh"], replacement_dict),
"t",
exe=True,
)
_write(
os.path.join(dest_dir, "start.sh"),
utils.sh_replace(template["start_svr_sh"], replacement_dict),
"t",
exe=True,
)
_write(
os.path.join(dest_dir, "sub_start.sh"),
utils.sh_replace(template["sub_start_svr_sh"], replacement_dict),
"t",
exe=True,
)
_write(
os.path.join(dest_dir, "stop_fl.sh"),
template["stop_fl_sh"],
"t",
exe=True,
)
_write(os.path.join(dest_dir, "server.crt"), cert_pair.ser_cert, "b", exe=False)
_write(os.path.join(dest_dir, "server.key"), cert_pair.ser_pri_key, "b", exe=False)
_write(os.path.join(dest_dir, "rootCA.pem"), project.root_cert, "b", exe=False)
if not project.ha_mode:
_write(
os.path.join(dest_dir, get_csp_start_script_name("azure")),
utils.sh_replace(
tplt.get_cloud_script_header() + get_csp_template("azure", "svr", template),
{"server_name": entity.name, "ORG": ""},
),
"t",
exe=True,
)
_write(
os.path.join(dest_dir, get_csp_start_script_name("aws")),
utils.sh_replace(
tplt.get_cloud_script_header() + get_csp_template("aws", "svr", template),
{"server_name": entity.name, "ORG": ""},
),
"t",
exe=True,
)
signatures = utils.sign_all(dest_dir, deserialize_ca_key(project.root_key))
json.dump(signatures, open(os.path.join(dest_dir, "signature.json"), "wt"))
# local folder creation
dest_dir = os.path.join(server_dir, "local")
os.mkdir(dest_dir)
_write(
os.path.join(dest_dir, "log.config.default"),
template["log_config"],
"t",
)
_write(
os.path.join(dest_dir, "resources.json.default"),
template["local_server_resources"],
"t",
)
_write(
os.path.join(dest_dir, "privacy.json.sample"),
template["sample_privacy"],
"t",
)
_write(
os.path.join(dest_dir, "authorization.json.default"),
template["default_authz"],
"t",
)
# workspace folder file
_write(
os.path.join(server_dir, "readme.txt"),
template["readme_fs"],
"t",
)
run_args = ["zip", "-rq", "-P", key, "tmp.zip", "."]
subprocess.run(run_args, cwd=tmp_dir)
fileobj = io.BytesIO()
with open(os.path.join(tmp_dir, "tmp.zip"), "rb") as fo:
fileobj.write(fo.read())
fileobj.seek(0)
return fileobj, f"{entity.name}.zip"
def gen_client(key, id):
project = Project.query.first()
client = Client.query.get(id)
entity = Entity(client.name, client.organization.name)
issuer = Entity(project.short_name)
signing_cert_pair = CertPair(issuer, project.root_key, project.root_cert)
cert_pair = make_cert(entity, signing_cert_pair)
config = json.loads(template["fed_client"])
config["servers"][0]["name"] = project.short_name
config["servers"][0]["service"]["scheme"] = project.scheme if hasattr(project, "scheme") else "grpc"
replacement_dict = {
"client_name": entity.name,
"config_folder": "config",
"docker_image": project.app_location.split(" ")[-1] if project.app_location else "nvflare/nvflare",
"org_name": entity.org,
}
if project.ha_mode:
overseer_agent = {"path": "nvflare.ha.overseer_agent.HttpOverseerAgent"}
overseer_agent["args"] = {
"role": "client",
"overseer_end_point": f"https://{project.overseer}:8443/api/v1",
"project": project.short_name,
"name": entity.name,
}
else:
overseer_agent = {"path": "nvflare.ha.dummy_overseer_agent.DummyOverseerAgent"}
overseer_agent["args"] = {"sp_end_point": f"{project.server1}:8002:8003"}
config["overseer_agent"] = overseer_agent
tplt = tplt_utils.Template(template)
with tempfile.TemporaryDirectory() as tmp_dir:
client_dir = os.path.join(tmp_dir, entity.name)
dest_dir = os.path.join(client_dir, "startup")
os.mkdir(client_dir)
os.mkdir(dest_dir)
_write(os.path.join(dest_dir, "fed_client.json"), json.dumps(config, indent=2), "t")
_write(
os.path.join(dest_dir, "docker.sh"),
utils.sh_replace(template["docker_cln_sh"], replacement_dict),
"t",
exe=True,
)
_write(
os.path.join(dest_dir, "start.sh"),
template["start_cln_sh"],
"t",
exe=True,
)
_write(
os.path.join(dest_dir, "sub_start.sh"),
utils.sh_replace(template["sub_start_cln_sh"], replacement_dict),
"t",
exe=True,
)
_write(
os.path.join(dest_dir, "stop_fl.sh"),
template["stop_fl_sh"],
"t",
exe=True,
)
_write(os.path.join(dest_dir, "client.crt"), cert_pair.ser_cert, "b", exe=False)
_write(os.path.join(dest_dir, "client.key"), cert_pair.ser_pri_key, "b", exe=False)
_write(os.path.join(dest_dir, "rootCA.pem"), project.root_cert, "b", exe=False)
_write(
os.path.join(dest_dir, get_csp_start_script_name("azure")),
utils.sh_replace(
tplt.get_cloud_script_header() + get_csp_template("azure", "cln", template),
{"SITE": entity.name, "ORG": entity.org},
),
"t",
exe=True,
)
_write(
os.path.join(dest_dir, get_csp_start_script_name("aws")),
utils.sh_replace(
tplt.get_cloud_script_header() + get_csp_template("aws", "cln", template),
{"SITE": entity.name, "ORG": entity.org},
),
"t",
exe=True,
)
signatures = utils.sign_all(dest_dir, deserialize_ca_key(project.root_key))
json.dump(signatures, open(os.path.join(dest_dir, "signature.json"), "wt"))
# local folder creation
dest_dir = os.path.join(client_dir, "local")
os.mkdir(dest_dir)
_write(
os.path.join(dest_dir, "log.config.default"),
template["log_config"],
"t",
)
resources = json.loads(template["local_client_resources"])
for component in resources["components"]:
if "nvflare.app_common.resource_managers.gpu_resource_manager.GPUResourceManager" == component["path"]:
component["args"] = json.loads(client.capacity.capacity)
break
_write(
os.path.join(dest_dir, "resources.json.default"),
json.dumps(resources, indent=2),
"t",
)
_write(
os.path.join(dest_dir, "privacy.json.sample"),
template["sample_privacy"],
"t",
)
_write(
os.path.join(dest_dir, "authorization.json.default"),
template["default_authz"],
"t",
)
# workspace folder file
_write(
os.path.join(client_dir, "readme.txt"),
template["readme_fc"],
"t",
)
run_args = ["zip", "-rq", "-P", key, "tmp.zip", "."]
subprocess.run(run_args, cwd=tmp_dir)
fileobj = io.BytesIO()
with open(os.path.join(tmp_dir, "tmp.zip"), "rb") as fo:
fileobj.write(fo.read())
fileobj.seek(0)
return fileobj, f"{entity.name}.zip"
def gen_user(key, id):
project = Project.query.first()
server_name = project.server1
user = User.query.get(id)
entity = Entity(user.email, user.organization.name, user.role.name)
issuer = Entity(project.short_name)
signing_cert_pair = CertPair(issuer, project.root_key, project.root_cert)
cert_pair = make_cert(entity, signing_cert_pair)
config = json.loads(template["fed_admin"])
replacement_dict = {"admin_name": entity.name, "cn": server_name, "admin_port": "8003", "docker_image": ""}
if project.ha_mode:
overseer_agent = {"path": "nvflare.ha.overseer_agent.HttpOverseerAgent"}
overseer_agent["args"] = {
"role": "admin",
"overseer_end_point": f"https://{project.overseer}:8443/api/v1",
"project": project.short_name,
"name": entity.name,
}
else:
overseer_agent = {"path": "nvflare.ha.dummy_overseer_agent.DummyOverseerAgent"}
overseer_agent["args"] = {"sp_end_point": f"{project.server1}:8002:8003"}
config["admin"].update({"overseer_agent": overseer_agent})
with tempfile.TemporaryDirectory() as tmp_dir:
user_dir = os.path.join(tmp_dir, entity.name)
dest_dir = os.path.join(user_dir, "startup")
os.mkdir(user_dir)
os.mkdir(dest_dir)
_write(os.path.join(dest_dir, "fed_admin.json"), json.dumps(config, indent=2), "t")
_write(
os.path.join(dest_dir, "fl_admin.sh"),
utils.sh_replace(template["fl_admin_sh"], replacement_dict),
"t",
exe=True,
)
_write(os.path.join(dest_dir, "client.crt"), cert_pair.ser_cert, "b", exe=False)
_write(os.path.join(dest_dir, "client.key"), cert_pair.ser_pri_key, "b", exe=False)
_write(os.path.join(dest_dir, "rootCA.pem"), project.root_cert, "b", exe=False)
signatures = utils.sign_all(dest_dir, deserialize_ca_key(project.root_key))
json.dump(signatures, open(os.path.join(dest_dir, "signature.json"), "wt"))
# local folder creation
dest_dir = os.path.join(user_dir, "local")
os.mkdir(dest_dir)
# workspace folder file
_write(
os.path.join(user_dir, "readme.txt"),
template["readme_am"],
"t",
)
_write(
os.path.join(user_dir, "system_info.ipynb"),
utils.sh_replace(template["adm_notebook"], replacement_dict),
"t",
)
run_args = ["zip", "-rq", "-P", key, "tmp.zip", "."]
subprocess.run(run_args, cwd=tmp_dir)
fileobj = io.BytesIO()
with open(os.path.join(tmp_dir, "tmp.zip"), "rb") as fo:
fileobj.write(fo.read())
fileobj.seek(0)
return fileobj, f"{entity.name}.zip"
| NVFlare-main | nvflare/dashboard/application/blob.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import current_app as app
from flask import jsonify, make_response, request
from flask_jwt_extended import create_access_token, get_jwt, jwt_required
from . import jwt
from .store import Store
@jwt.expired_token_loader
def my_expired_token_callback(jwt_header, jwt_payload):
return jsonify({"status": "unauthenticated"}), 401
@app.route("/application-config")
def application_config_html():
return app.send_static_file("application-config.html")
@app.route("/downloads")
def downloads_html():
return app.send_static_file("downloads.html")
@app.route("/")
def index_html():
return app.send_static_file("index.html")
@app.route("/logout")
def logout_html():
return app.send_static_file("logout.html")
@app.route("/project-admin-dashboard")
def project_admin_dashboard_html():
return app.send_static_file("project-admin-dashboard.html")
@app.route("/project-configuration")
def project_configuration_html():
return app.send_static_file("project-configuration.html")
@app.route("/registration-form")
def registration_form_html():
return app.send_static_file("registration-form.html")
@app.route("/server-config")
def server_config_html():
return app.send_static_file("server-config.html")
@app.route("/site-dashboard")
def site_dashboard_html():
return app.send_static_file("site-dashboard.html")
@app.route("/user-dashboard")
def user_dashboard_html():
return app.send_static_file("user-dashboard.html")
@app.route("/api/v1/login", methods=["POST"])
def login():
req = request.json
email = req.get("email", None)
password = req.get("password", None)
user = Store.verify_user(email, password)
if user:
additional_claims = {"role": user.role.name, "organization": user.organization.name}
access_token = create_access_token(identity=user.email, additional_claims=additional_claims)
return jsonify(
{
"status": "ok",
"user": {"id": user.id, "email": user.email, "role": user.role.name},
"access_token": access_token,
}
)
else:
return jsonify({"status": "unauthenticated"}), 401
@app.route("/api/v1/overseer/blob", methods=["POST"])
@jwt_required()
def overseer_blob():
claims = get_jwt()
if claims.get("role") == "project_admin":
pin = request.json.get("pin")
fileobj, filename = Store.get_overseer_blob(pin)
response = make_response(fileobj.read())
response.headers.set("Content-Type", "zip")
response.headers.set("Content-Disposition", f'attachment; filename="{filename}"')
return response
else:
return jsonify({"status": "unauthorized"}), 403
@app.route("/api/v1/servers/<int:id>/blob", methods=["POST"])
@jwt_required()
def server_blob(id):
claims = get_jwt()
if claims.get("role") == "project_admin":
pin = request.json.get("pin")
fileobj, filename = Store.get_server_blob(pin, id == 1)
response = make_response(fileobj.read())
response.headers.set("Content-Type", "zip")
response.headers.set("Content-Disposition", f'attachment; filename="{filename}"')
return response
else:
return jsonify({"status": "unauthorized"}), 403
@app.route("/api/v1/project", methods=["PATCH"])
@jwt_required()
def set_project():
claims = get_jwt()
if claims.get("role") == "project_admin":
req = request.json
return jsonify(Store.set_project(req))
else:
return jsonify({"status": "unauthorized"}), 403
@app.route("/api/v1/project", methods=["GET"])
def get_project():
return jsonify(Store.get_project())
@app.route("/api/v1/organizations", methods=["GET"])
def get_orgs():
return jsonify(Store.get_orgs())
| NVFlare-main | nvflare/dashboard/application/project.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from threading import Lock
from typing import List
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import ReservedHeaderKey, Shareable, make_reply
from nvflare.fuel.f3.cellnet.core_cell import Message, MessageHeaderKey
from nvflare.fuel.f3.cellnet.core_cell import ReturnCode as CellReturnCode
from nvflare.fuel.f3.cellnet.fqcn import FQCN
from nvflare.private.defs import CellChannel
from nvflare.private.fed.utils.fed_utils import get_target_names
from nvflare.security.logging import secure_format_traceback
class AuxRunner(FLComponent):
def __init__(self, engine):
"""To init the AuxRunner."""
FLComponent.__init__(self)
self.engine = engine
self.topic_table = {} # topic => handler
self.reg_lock = Lock()
def register_aux_message_handler(self, topic: str, message_handle_func):
"""Register aux message handling function with specified topics.
This method should be called by ServerEngine's register_aux_message_handler method.
Args:
topic: the topic to be handled by the func
message_handle_func: the func to handle the message. Must follow aux_message_handle_func_signature.
Returns: N/A
Exception is raised when:
a handler is already registered for the topic;
bad topic - must be a non-empty string
bad message_handle_func - must be callable
"""
if not isinstance(topic, str):
raise TypeError(f"topic must be str, but got {type(topic)}")
if len(topic) <= 0:
raise ValueError("topic must not be empty")
if message_handle_func is None:
raise ValueError("message handler function is not specified")
if not callable(message_handle_func):
raise TypeError("specified message_handle_func {} is not callable".format(message_handle_func))
with self.reg_lock:
if topic in self.topic_table:
raise ValueError(f"handler already registered for topic {topic}")
self.topic_table[topic] = message_handle_func
def _process_request(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
"""Call to process the request.
.. note::
peer_ctx props must have been set into the PEER_PROPS header of the request by Engine.
Args:
topic: topic of the message
request: message to be handled
fl_ctx: fl context
Returns: reply message
"""
handler_f = self.topic_table.get(topic, None)
if handler_f is None:
self.log_error(fl_ctx, "received unknown aux message topic {}".format(topic))
return make_reply(ReturnCode.TOPIC_UNKNOWN)
if not isinstance(request, Shareable):
self.log_error(fl_ctx, f"received invalid aux request: expects a Shareable but got {type(request)}")
return make_reply(ReturnCode.BAD_REQUEST_DATA)
peer_props = request.get_peer_props()
if not peer_props:
self.log_error(fl_ctx, "missing peer_ctx from client")
return make_reply(ReturnCode.MISSING_PEER_CONTEXT)
if not isinstance(peer_props, dict):
self.log_error(
fl_ctx,
f"bad peer_props from client: expects dict but got {type(peer_props)}",
)
return make_reply(ReturnCode.BAD_PEER_CONTEXT)
try:
reply = handler_f(topic=topic, request=request, fl_ctx=fl_ctx)
except Exception:
self.log_exception(fl_ctx, "processing error in message handling")
return make_reply(ReturnCode.HANDLER_EXCEPTION)
return reply
def dispatch(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
"""This method is to be called by the Engine when an aux message is received from peer.
.. note::
peer_ctx props must have been set into the PEER_PROPS header of the request by Engine.
Args:
topic: message topic
request: request message
fl_ctx: FLContext
Returns: reply message
"""
peer_props = request.get_peer_props()
if peer_props:
peer_ctx = FLContext()
peer_ctx.set_public_props(peer_props)
fl_ctx.set_peer_context(peer_ctx)
valid_reply = self._process_request(topic, request, fl_ctx)
if isinstance(request, Shareable):
cookie_jar = request.get_cookie_jar()
if cookie_jar:
valid_reply.set_cookie_jar(cookie_jar)
return valid_reply
def send_aux_request(
self,
targets: list,
topic: str,
request: Shareable,
timeout: float,
fl_ctx: FLContext,
bulk_send: bool = False,
optional: bool = False,
secure: bool = False,
) -> dict:
target_names = get_target_names(targets)
if not target_names:
return {}
_, invalid_names = self.engine.validate_targets(target_names)
if invalid_names:
raise ValueError(f"invalid target(s): {invalid_names}")
try:
return self._send_to_cell(
targets=targets,
channel=CellChannel.AUX_COMMUNICATION,
topic=topic,
request=request,
timeout=timeout,
fl_ctx=fl_ctx,
bulk_send=bulk_send,
optional=optional,
secure=secure,
)
except Exception:
if optional:
self.logger.debug(f"Failed to send aux message {topic} to targets: {targets}")
self.logger.debug(secure_format_traceback())
else:
self.logger.error(f"Failed to send aux message {topic} to targets: {targets}")
self.logger.error(secure_format_traceback())
return {}
def _send_to_cell(
self,
targets: List[str],
channel: str,
topic: str,
request: Shareable,
timeout: float,
fl_ctx: FLContext,
bulk_send=False,
optional=False,
secure=False,
) -> dict:
"""Send request to the job cells of other target sites.
Args:
targets (list): list of client names that the request will be sent to
channel (str): channel of the request
topic (str): topic of the request
request (Shareable): request
timeout (float): how long to wait for result. 0 means fire-and-forget
fl_ctx (FLContext): the FL context
bulk_send: whether to bulk send this request (only applies in the fire-and-forget situation)
optional: whether the request is optional
Returns:
A dict of Shareables
"""
request.set_header(ReservedHeaderKey.TOPIC, topic)
request.set_peer_props(fl_ctx.get_all_public_props())
job_id = fl_ctx.get_job_id()
start = time.time()
self.logger.debug("waiting for cell...")
max_wait = 5.0
while True:
cell = self.engine.get_cell()
if cell:
break
if time.time() - start > max_wait:
self.logger.error(f"Cannot get cell after {max_wait} seconds!")
return {}
time.sleep(0.01)
self.logger.debug(f"Got cell in {time.time() - start} secs")
target_names = []
for t in targets:
if not isinstance(t, str):
raise ValueError(f"invalid target name {t}: expect str but got {type(t)}")
if t not in target_names:
target_names.append(t)
target_fqcns = []
for name in target_names:
target_fqcns.append(FQCN.join([name, job_id]))
cell_msg = Message(payload=request)
if timeout > 0:
cell_replies = cell.broadcast_request(
channel=channel,
topic=topic,
request=cell_msg,
targets=target_fqcns,
timeout=timeout,
optional=optional,
secure=secure,
)
replies = {}
if cell_replies:
for k, v in cell_replies.items():
assert isinstance(v, Message)
rc = v.get_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
client_name = FQCN.get_root(k)
if rc == CellReturnCode.OK:
result = v.payload
if not isinstance(result, Shareable):
self.logger.error(f"reply of {channel}:{topic} must be dict but got {type(result)}")
result = make_reply(ReturnCode.ERROR)
replies[client_name] = result
else:
src = self._convert_return_code(rc)
replies[client_name] = make_reply(src)
return replies
else:
if bulk_send:
cell.queue_message(channel=channel, topic=topic, message=cell_msg, targets=target_fqcns)
else:
cell.fire_and_forget(
channel=channel, topic=topic, message=cell_msg, targets=target_fqcns, optional=optional
)
return {}
def _convert_return_code(self, rc):
rc_table = {
CellReturnCode.TIMEOUT: ReturnCode.COMMUNICATION_ERROR,
CellReturnCode.COMM_ERROR: ReturnCode.COMMUNICATION_ERROR,
CellReturnCode.PROCESS_EXCEPTION: ReturnCode.EXECUTION_EXCEPTION,
CellReturnCode.ABORT_RUN: CellReturnCode.ABORT_RUN,
CellReturnCode.INVALID_REQUEST: CellReturnCode.INVALID_REQUEST,
CellReturnCode.INVALID_SESSION: CellReturnCode.INVALID_SESSION,
CellReturnCode.AUTHENTICATION_ERROR: CellReturnCode.UNAUTHENTICATED,
CellReturnCode.SERVICE_UNAVAILABLE: CellReturnCode.SERVICE_UNAVAILABLE,
}
return rc_table.get(rc, ReturnCode.ERROR)
| NVFlare-main | nvflare/private/aux_runner.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.f3.message import Message
from nvflare.fuel.hci.server.constants import ConnProps
class SpecialTaskName(object):
TRY_AGAIN = "__try_again__"
END_RUN = "__end_run__"
class TaskConstant(object):
WAIT_TIME = "__wait_time__"
class EngineConstant(object):
FEDERATE_CLIENT = "federate_client"
FL_TOKEN = "fl_token"
CLIENT_TOKEN_FILE = "client_token.txt"
ENGINE_TASK_NAME = "engine_task_name"
class InfoCollectorTopic(object):
SHOW_STATS = "info.show_stats"
SHOW_ERRORS = "info.show_errors"
RESET_ERRORS = "info.reset_errors"
class ComponentCallerTopic(object):
CALL_COMPONENT = "comp_caller.call"
class TrainingTopic(object):
START = "train.start"
ABORT = "train.abort"
ABORT_TASK = "train.abort_task"
DELETE_RUN = "train.delete_run"
DEPLOY = "train.deploy"
SHUTDOWN = "train.shutdown"
RESTART = "train.restart"
CHECK_STATUS = "train.check_status"
SET_JOB_ID = "train.set_job_id"
CHECK_RESOURCE = "scheduler.check_resource"
ALLOCATE_RESOURCE = "scheduler.allocate_resource"
CANCEL_RESOURCE = "scheduler.cancel_resource"
START_JOB = "train.start_job"
GET_SCOPES = "train.get_scopes"
class RequestHeader(object):
JOB_ID = "job_id"
TOPIC = "topic"
JOB_META = "job_meta"
APP_NAME = "app_name"
CONTROL_COMMAND = "control_command"
CALL_NAME = "call_name"
COMPONENT_TARGET = "component_target"
ADMIN_COMMAND = "admin_command"
USER_NAME = ConnProps.USER_NAME
USER_ORG = ConnProps.USER_ORG
USER_ROLE = ConnProps.USER_ROLE
SUBMITTER_NAME = ConnProps.SUBMITTER_NAME
SUBMITTER_ORG = ConnProps.SUBMITTER_ORG
SUBMITTER_ROLE = ConnProps.SUBMITTER_ROLE
REQUIRE_AUTHZ = "require_authz"
class SysCommandTopic(object):
SYS_INFO = "sys.info"
SHELL = "sys.shell"
REPORT_RESOURCES = "resource_manager.report_resources"
class ControlCommandTopic(object):
DO_COMMAND = "control.do_command"
class ControlCommandName(object):
ABORT_TASK = "abort_task"
END_RUN = "end_run"
class ClientStatusKey(object):
JOB_ID = "job_id"
CURRENT_TASK = "current_task"
STATUS = "status"
APP_NAME = "app_name"
CLIENT_NAME = "client_name"
RUNNING_JOBS = "running_jobs"
class ScopeInfoKey(object):
SCOPE_NAMES = "scope_names"
DEFAULT_SCOPE = "default_scope"
# TODO:: Remove some of these constants
class AppFolderConstants:
"""hard coded file names inside the app folder."""
CONFIG_TRAIN = "config_train.json"
CONFIG_ENV = "environment.json"
class SSLConstants:
"""hard coded names related to SSL."""
CERT = "ssl_cert"
PRIVATE_KEY = "ssl_private_key"
ROOT_CERT = "ssl_root_cert"
class CellChannel:
CLIENT_MAIN = "admin"
AUX_COMMUNICATION = "aux_communication"
SERVER_MAIN = "task"
SERVER_COMMAND = "server_command"
SERVER_PARENT_LISTENER = "server_parent_listener"
CLIENT_COMMAND = "client_command"
CLIENT_SUB_WORKER_COMMAND = "client_sub_worker_command"
MULTI_PROCESS_EXECUTOR = "multi_process_executor"
SIMULATOR_RUNNER = "simulator_runner"
RETURN_ONLY = "return_only"
class CellChannelTopic:
Register = "register"
Quit = "quit"
GET_TASK = "get_task"
SUBMIT_RESULT = "submit_result"
HEART_BEAT = "heart_beat"
EXECUTE_RESULT = "execute_result"
FIRE_EVENT = "fire_event"
REPORT_JOB_FAILURE = "report_job_failure"
SIMULATOR_WORKER_INIT = "simulator_worker_init"
ERROR_MSG_PREFIX = "NVFLARE_ERROR"
class CellMessageHeaderKeys:
CLIENT_NAME = "client_name"
CLIENT_IP = "client_ip"
PROJECT_NAME = "project_name"
TOKEN = "token"
SSID = "ssid"
UNAUTHENTICATED = "unauthenticated"
JOB_ID = "job_id"
JOB_IDS = "job_ids"
MESSAGE = "message"
ABORT_JOBS = "abort_jobs"
class JobFailureMsgKey:
JOB_ID = "job_id"
CODE = "code"
REASON = "reason"
def new_cell_message(headers: dict, payload=None):
msg_headers = {}
if headers:
msg_headers.update(headers)
return Message(msg_headers, payload)
| NVFlare-main | nvflare/private/defs.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import EventScope, FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.security.logging import secure_format_exception
# do not use underscore as key name; otherwise it cannot be removed from ctx
_KEY_EVENT_DEPTH = "###event_depth"
_MAX_EVENT_DEPTH = 20
def fire_event(event: str, handlers: list, ctx: FLContext):
"""Fires the specified event and invokes the list of handlers.
Args:
event: the event to be fired
handlers: handlers to be invoked
ctx: context for cross-component data sharing
Returns: N/A
"""
event_id = str(uuid.uuid4())
event_data = ctx.get_prop(FLContextKey.EVENT_DATA, None)
event_origin = ctx.get_prop(FLContextKey.EVENT_ORIGIN, None)
event_scope = ctx.get_prop(FLContextKey.EVENT_SCOPE, EventScope.LOCAL)
depth = ctx.get_prop(_KEY_EVENT_DEPTH, 0)
if depth > _MAX_EVENT_DEPTH:
# too many recursive event calls
raise RuntimeError("Recursive event calls too deep (>{})".format(_MAX_EVENT_DEPTH))
ctx.set_prop(key=_KEY_EVENT_DEPTH, value=depth + 1, private=True, sticky=False)
if handlers:
for h in handlers:
if not isinstance(h, FLComponent):
raise TypeError("handler must be FLComponent but got {}".format(type(h)))
try:
# since events could be recursive (a handler fires another event) on the same fl_ctx,
# we need to reset these key values into the fl_ctx
ctx.set_prop(key=FLContextKey.EVENT_ID, value=event_id, private=True, sticky=False)
ctx.set_prop(key=FLContextKey.EVENT_DATA, value=event_data, private=True, sticky=False)
ctx.set_prop(key=FLContextKey.EVENT_ORIGIN, value=event_origin, private=True, sticky=False)
ctx.set_prop(key=FLContextKey.EVENT_SCOPE, value=event_scope, private=True, sticky=False)
h.handle_event(event, ctx)
except Exception as e:
h.log_exception(
ctx, f'Exception when handling event "{event}": {secure_format_exception(e)}', fire_event=False
)
exceptions = ctx.get_prop(FLContextKey.EXCEPTIONS)
if not exceptions:
exceptions = {}
ctx.set_prop(FLContextKey.EXCEPTIONS, exceptions, sticky=False, private=True)
exceptions[h.name] = e
ctx.set_prop(key=_KEY_EVENT_DEPTH, value=depth, private=True, sticky=False)
| NVFlare-main | nvflare/private/event.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
class MsgHeader(object):
REF_MSG_ID = "_refMsgId"
RETURN_CODE = "_rtnCode"
META = "_meta"
class ReturnCode(object):
OK = "_ok"
ERROR = "_error"
class Message(object):
def __init__(self, topic: str, body):
"""To init a Message.
Args:
topic: message topic
body: message body.
"""
self.id = str(uuid.uuid4())
self.topic = topic
self.body = body
self.headers = {}
def set_header(self, key, value):
self.headers[key] = value
def set_meta(self, meta: dict):
meta_str = json.dumps(meta)
self.set_header(MsgHeader.META, meta_str)
def get_meta(self):
meta_str = self.get_header(MsgHeader.META, None)
if meta_str:
return json.loads(meta_str)
else:
return None
def set_headers(self, headers: dict):
if not headers:
return
if not isinstance(headers, dict):
raise TypeError("headers must be dict but got {}".format(type(headers)))
if len(headers) > 0:
self.headers.update(headers)
def get_header(self, key, default=None):
return self.headers.get(key, default)
def get_ref_id(self, default=None):
return self.get_header(MsgHeader.REF_MSG_ID, default)
def set_ref_id(self, msg_id):
self.set_header(MsgHeader.REF_MSG_ID, msg_id)
def error_reply(err: str, meta: dict = None) -> Message:
msg = Message(topic="reply", body=err)
msg.set_header(MsgHeader.RETURN_CODE, ReturnCode.ERROR)
if meta:
msg.set_meta(meta)
return msg
def ok_reply(topic=None, body=None, meta: dict = None) -> Message:
if body is None:
body = "ok"
if topic is None:
topic = "reply"
msg = Message(topic=topic, body=body)
msg.set_header(MsgHeader.RETURN_CODE, ReturnCode.OK)
if meta:
msg.set_meta(meta)
return msg
| NVFlare-main | nvflare/private/admin_defs.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
from nvflare.fuel.common.excepts import ComponentNotAuthorized, ConfigError
from nvflare.fuel.utils.class_utils import ModuleScanner, get_class
from nvflare.fuel.utils.component_builder import ComponentBuilder
from nvflare.fuel.utils.config_factory import ConfigFactory
from nvflare.fuel.utils.config_service import ConfigService
from nvflare.fuel.utils.dict_utils import augment, extract_first_level_primitive
from nvflare.fuel.utils.json_scanner import JsonObjectProcessor, JsonScanner, Node
from nvflare.fuel.utils.wfconf import _EnvUpdater
from nvflare.security.logging import secure_format_exception
class ConfigContext(object):
def __init__(self):
"""To init thee ConfigContext."""
self.config_json = None
self.pass_num = 0
class JsonConfigurator(JsonObjectProcessor, ComponentBuilder):
def __init__(
self,
config_file_name: Union[str, List[str]],
base_pkgs: List[str],
module_names: List[str],
exclude_libs=True,
num_passes=1,
):
"""To init the JsonConfigurator.
Args:
config_file_name: config filename or list of JSON config file names
base_pkgs: base packages need to be scanned
module_names: module names need to be scanned
exclude_libs: True/False to exclude the libs folder
num_passes: number of passes to parsing the config
"""
JsonObjectProcessor.__init__(self)
if not isinstance(num_passes, int):
raise TypeError(f"num_passes must be int but got {num_passes}")
if not num_passes > 0:
raise ValueError(f"num_passes must > 0 but got {num_passes}")
if isinstance(config_file_name, str):
config_files = [config_file_name]
elif isinstance(config_file_name, list):
config_files = config_file_name
else:
raise TypeError(f"config_file_names must be str or list of strs but got {type(config_file_name)}")
for f in config_files:
if not ConfigFactory.has_config(f):
raise FileNotFoundError(f"config_file_names {f} does not exist or not a file")
self.config_file_names = config_files
self.num_passes = num_passes
self.module_scanner = ModuleScanner(base_pkgs, module_names, exclude_libs)
self.config_ctx = None
config_data = {}
for f in config_files:
data = ConfigService.load_config_dict(f)
try:
augment(to_dict=config_data, from_dict=data, from_override_to=False)
except Exception as e:
raise RuntimeError("Error processing config file {}: {}".format(f, secure_format_exception(e)))
self.config_data = config_data
self.json_scanner = JsonScanner(config_data, config_files)
self.build_auth_func = None
self.build_auth_kwargs = None
def set_component_build_authorizer(self, func, **kwargs):
if not callable(func):
raise ValueError("authorizer func is not callable")
self.build_auth_func = func
self.build_auth_kwargs = kwargs
def authorize_and_build_component(self, config_dict, config_ctx: ConfigContext, node: Node):
if self.build_auth_func is not None:
err = self.build_auth_func(config_dict, config_ctx, node, **self.build_auth_kwargs)
if err:
raise ComponentNotAuthorized(f"component not authorized: {err}")
return self.build_component(config_dict)
def get_module_scanner(self):
return self.module_scanner
def _do_configure(self):
config_ctx = ConfigContext()
config_ctx.config_json = self.config_data
self.config_ctx = config_ctx
all_vars = extract_first_level_primitive(self.config_data)
self.json_scanner.scan(_EnvUpdater(all_vars))
self.start_config(self.config_ctx)
# scan the config to create components
for i in range(self.num_passes):
self.config_ctx.pass_num = i + 1
self.json_scanner.scan(self)
# finalize configuration
self.finalize_config(self.config_ctx)
def configure(self):
try:
self._do_configure()
except Exception as e:
print("Error processing config {}: {}".format(self.config_file_names, secure_format_exception(e)))
raise e
def process_element(self, node: Node):
self.process_config_element(self.config_ctx, node)
def is_configured_subclass(self, config_dict, base_class):
return issubclass(get_class(self.get_class_path(config_dict)), base_class)
def start_config(self, config_ctx: ConfigContext):
pass
def process_config_element(self, config_ctx: ConfigContext, node: Node):
pass
def finalize_config(self, config_ctx: ConfigContext):
pass
def get_component_refs(component):
if "name" in component:
name = component["name"]
key = "name"
elif "path" in component:
name = component["path"]
key = "path"
else:
raise ConfigError('component has no "name" or "path')
parts = name.split("#")
component[key] = parts[0]
return parts
| NVFlare-main | nvflare/private/json_configer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/private/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ShareableHeader:
IS_RESOURCE_ENOUGH = "_is_resource_enough"
RESOURCE_RESERVE_TOKEN = "_resource_reserve_token"
| NVFlare-main | nvflare/private/scheduler_constants.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
from nvflare.apis.filter import Filter, FilterChainType, FilterContextKey, FilterSource
from nvflare.apis.fl_constant import FilterKey
from nvflare.private.fed_json_config import FilterChain
class Scope(object):
TASK_DATA_FILTERS_NAME = "task_data_filters"
TASK_RESULT_FILTERS_NAME = "task_result_filters"
def __init__(self):
self.name = ""
self.props = {}
self.task_data_filters = {FilterKey.IN: [], FilterKey.OUT: []}
self.task_result_filters = {FilterKey.IN: [], FilterKey.OUT: []}
def set_name(self, name: str):
if not isinstance(name, str):
raise TypeError(f"scope name must be str but got {type(name)}")
self.name = name
def set_props(self, props: dict):
if not isinstance(props, dict):
raise TypeError(f"scope properties must be dict but got {type(props)}")
self.props = props
def add_task_data_filter(self, f: Filter, direction):
if not FilterChain.validate_direction(direction):
raise TypeError("Filter chain direction {} is not supported.".format(direction))
if not isinstance(f, Filter):
raise TypeError(f"task data filter must be Filter but got {type(f)}")
f.set_prop(FilterContextKey.CHAIN_TYPE, FilterChainType.TASK_DATA_CHAIN)
f.set_prop(FilterContextKey.SOURCE, FilterSource.SITE)
if direction == FilterKey.INOUT:
self.task_data_filters[FilterKey.IN].append(f)
self.task_data_filters[FilterKey.OUT].append(f)
else:
self.task_data_filters.get(direction).append(f)
def add_task_result_filter(self, f: Filter, direction):
if not FilterChain.validate_direction(direction):
raise TypeError("Filter chain direction {} is not supported.".format(direction))
if not isinstance(f, Filter):
raise TypeError(f"task result filter must be Filter but got {type(f)}")
f.set_prop(FilterContextKey.CHAIN_TYPE, FilterChainType.TASK_RESULT_CHAIN)
f.set_prop(FilterContextKey.SOURCE, FilterSource.SITE)
if direction == FilterKey.INOUT:
self.task_result_filters[FilterKey.IN].append(f)
self.task_result_filters[FilterKey.OUT].append(f)
else:
self.task_result_filters.get(direction).append(f)
class PrivacyManager(object):
def __init__(
self, scopes: Union[None, List[Scope]], default_scope_name: Union[None, str], components: Union[None, dict]
):
self.name_to_scopes = {}
self.default_scope = None
self.components = components
if scopes:
for s in scopes:
if s.name in self.name_to_scopes:
raise ValueError(f"duplicate scopes defined for name '{s.name}'")
self.name_to_scopes[s.name] = s
if default_scope_name:
self.default_scope = self.name_to_scopes.get(default_scope_name)
if not self.default_scope:
raise ValueError(f"specified default scope '{default_scope_name}' does not exist")
self.policy_defined = True
else:
self.policy_defined = False
def get_scope(self, name: Union[None, str]):
if not name:
return self.default_scope
return self.name_to_scopes.get(name)
def is_policy_defined(self):
return self.policy_defined
class PrivacyService(object):
manager = None
@staticmethod
def initialize(manager: PrivacyManager):
if manager and not isinstance(manager, PrivacyManager):
raise TypeError(f"manager must be an instance of PrivacyManager, but get {type(manager)}.")
PrivacyService.manager = manager
@staticmethod
def get_scope(name: Union[None, str]):
if not PrivacyService.manager:
return None
else:
return PrivacyService.manager.get_scope(name)
@staticmethod
def is_policy_defined():
if not PrivacyService.manager:
return False
else:
return PrivacyService.manager.is_policy_defined()
@staticmethod
def is_scope_allowed(scope_name: str):
"""Check whether the specified scope is allowed
Args:
scope_name: scope to be checked
Returns:
"""
if not PrivacyService.is_policy_defined():
return True
scope = PrivacyService.get_scope(scope_name)
return scope is not None
@staticmethod
def get_manager():
return PrivacyService.manager
| NVFlare-main | nvflare/private/privacy_manager.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from nvflare.apis.filter import Filter, FilterChainType, FilterContextKey, FilterSource
from nvflare.apis.fl_constant import FilterKey
from nvflare.fuel.utils.json_scanner import Node
from nvflare.private.json_configer import ConfigContext, ConfigError, JsonConfigurator
class FilterChain(object):
def __init__(self, chain_type, direction):
"""To init the FilterChain."""
self.chain_type = chain_type
self.tasks = []
self.filters = []
self.direction = direction
@classmethod
def validate_direction(cls, direction):
return direction in [FilterKey.IN, FilterKey.OUT, FilterKey.INOUT]
class FedJsonConfigurator(JsonConfigurator):
def __init__(self, config_file_name: str, base_pkgs: [str], module_names: [str], exclude_libs=True, is_server=True):
"""To init the FedJsonConfigurator.
Args:
config_file_name: config filename
base_pkgs: base packages need to be scanned
module_names: module names need to be scanned
exclude_libs: True/False to exclude the libs folder
"""
JsonConfigurator.__init__(
self,
config_file_name=config_file_name,
base_pkgs=base_pkgs,
module_names=module_names,
exclude_libs=exclude_libs,
)
self.format_version = None
self.handlers = []
self.components = {} # id => component
self.task_data_filter_chains = []
self.task_result_filter_chains = []
self.current_filter_chain = None
self.data_filter_table = None
self.result_filter_table = None
self.is_server = is_server
def process_config_element(self, config_ctx: ConfigContext, node: Node):
element = node.element
path = node.path()
if path == "format_version":
self.format_version = element
return
# if re.search(r"^handlers\.#[0-9]+$", path):
# h = self.build_component(element)
# if not isinstance(h, FLComponent):
# raise ConfigError("handler must be a FLComponent object, but got {}".format(type(h)))
# # Ensure only add one instance of the handlers for the same component
# if type(h).__name__ not in [type(t).__name__ for t in self.handlers]:
# self.handlers.append(h)
# return
if re.search(r"^components\.#[0-9]+$", path):
c = self.authorize_and_build_component(element, config_ctx, node)
cid = element.get("id", None)
if not cid:
raise ConfigError("missing component id")
if not isinstance(cid, str):
raise ConfigError('"id" must be str but got {}'.format(type(cid)))
if cid in self.components:
raise ConfigError('duplicate component id "{}"'.format(cid))
self.components[cid] = c
return
# result filters
if re.search(r"^task_result_filters\.#[0-9]+$", path):
default_direction = FilterKey.IN if self.is_server else FilterKey.OUT
self.current_filter_chain = FilterChain(FilterChainType.TASK_RESULT_CHAIN, default_direction)
node.props["data"] = self.current_filter_chain
node.exit_cb = self._process_result_filter_chain
return
if re.search(r"^task_result_filters\.#[0-9]+\.tasks$", path):
self.current_filter_chain.tasks = element
return
if re.search(r"^task_result_filters\.#[0-9]+\.direction$", path):
self.current_filter_chain.direction = element
return
if re.search(r"^task_result_filters.#[0-9]+\.filters\.#[0-9]+$", path):
f = self.authorize_and_build_component(element, config_ctx, node)
self.current_filter_chain.filters.append(f)
return
# data filters
if re.search(r"^task_data_filters\.#[0-9]+$", path):
default_direction = FilterKey.OUT if self.is_server else FilterKey.IN
self.current_filter_chain = FilterChain(FilterChainType.TASK_DATA_CHAIN, default_direction)
node.props["data"] = self.current_filter_chain
node.exit_cb = self._process_data_filter_chain
return
if re.search(r"^task_data_filters\.#[0-9]+\.tasks$", path):
self.current_filter_chain.tasks = element
return
if re.search(r"^task_data_filters\.#[0-9]+\.direction$", path):
self.current_filter_chain.direction = element
return
if re.search(r"^task_data_filters.#[0-9]+\.filters\.#[0-9]+$", path):
f = self.authorize_and_build_component(element, config_ctx, node)
self.current_filter_chain.filters.append(f)
return
def validate_tasks(self, tasks):
if not isinstance(tasks, list):
raise ConfigError('"tasks" must be specified as list of task names but got {}'.format(type(tasks)))
if len(tasks) <= 0:
raise ConfigError('"tasks" must not be empty')
for n in tasks:
if not isinstance(n, str):
raise ConfigError("task names must be string but got {}".format(type(n)))
def validate_filter_chain(self, chain: FilterChain):
self.validate_tasks(chain.tasks)
if not isinstance(chain.filters, list):
raise ConfigError('"filters" must be specified as list of filters but got {}'.format(type(chain.filters)))
if len(chain.filters) <= 0:
raise ConfigError('"filters" must not be empty')
for f in chain.filters:
if not isinstance(f, Filter):
raise ConfigError('"filters" must contain Filter object but got {}'.format(type(f)))
f.set_prop(FilterContextKey.CHAIN_TYPE, chain.chain_type)
f.set_prop(FilterContextKey.SOURCE, FilterSource.JOB)
def _process_result_filter_chain(self, node: Node):
filter_chain = node.props["data"]
self.validate_filter_chain(filter_chain)
self.task_result_filter_chains.append(filter_chain)
def _process_data_filter_chain(self, node: Node):
filter_chain = node.props["data"]
self.validate_filter_chain(filter_chain)
self.task_data_filter_chains.append(filter_chain)
def finalize_config(self, config_ctx: ConfigContext):
if self.format_version is None:
raise ConfigError("missing format_version")
if not isinstance(self.format_version, int):
raise ConfigError('"format_version" must be int, but got {}'.format(type(self.format_version)))
if self.format_version != 2:
raise ConfigError('wrong "format_version" {}: must be 2'.format(self.format_version))
data_filter_table = {}
for c in self.task_data_filter_chains:
self._build_filter_table(c, data_filter_table)
self.data_filter_table = data_filter_table
result_filter_table = {}
for c in self.task_result_filter_chains:
self._build_filter_table(c, result_filter_table)
self.result_filter_table = result_filter_table
def _build_filter_table(self, c, data_filter_table):
direction = c.direction.lower()
if not FilterChain.validate_direction(direction):
raise TypeError("Filter chain direction {} is not supported.".format(direction))
if not isinstance(c, FilterChain):
raise TypeError("chain must be FilterChain but got {}".format(type(c)))
for t in c.tasks:
if direction == FilterKey.INOUT:
directions = [FilterKey.IN, FilterKey.OUT]
else:
directions = [direction]
for item in directions:
task_filter_key = t + FilterKey.DELIMITER + item
if task_filter_key in data_filter_table:
raise ConfigError("multiple data filter chains defined for task {}".format(task_filter_key))
data_filter_table[task_filter_key] = c.filters
| NVFlare-main | nvflare/private/fed_json_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from nvflare.apis.fl_component import FLComponent
class Runner(FLComponent):
def start(self):
"""Method call at the start of the Runner process."""
pass
@abstractmethod
def stop(self):
"""Method call at the end of the Runner process."""
| NVFlare-main | nvflare/private/fed/runner.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/private/fed/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import ReservedHeaderKey, Shareable, make_reply
from nvflare.fuel.f3.cellnet.core_cell import FQCN, CoreCell, Message, MessageHeaderKey
from nvflare.fuel.f3.cellnet.core_cell import ReturnCode as CellReturnCode
from nvflare.private.defs import CellMessageHeaderKeys
class CellMessageInterface(FLComponent, ABC):
HEADER_KEY_PEER_PROPS = "cmi.peer_props"
HEADER_JOB_ID = "cmi.job_id"
HEADER_PROJECT_NAME = "cmi.project"
HEADER_SSID = "cmi.ssid"
HEADER_CLIENT_TOKEN = "cmi.client_token"
HEADER_CLIENT_NAME = "cmi.client_name"
PROP_KEY_CLIENT = "cmi.client"
PROP_KEY_FL_CTX = "cmi.fl_ctx"
PROP_KEY_PEER_CTX = "cmi.peer_ctx"
RC_TABLE = {
CellReturnCode.TIMEOUT: ReturnCode.COMMUNICATION_ERROR,
CellReturnCode.COMM_ERROR: ReturnCode.COMMUNICATION_ERROR,
CellReturnCode.PROCESS_EXCEPTION: ReturnCode.EXECUTION_EXCEPTION,
CellReturnCode.ABORT_RUN: CellReturnCode.ABORT_RUN,
CellReturnCode.INVALID_REQUEST: CellReturnCode.INVALID_REQUEST,
CellReturnCode.INVALID_SESSION: CellReturnCode.INVALID_SESSION,
CellReturnCode.AUTHENTICATION_ERROR: CellReturnCode.UNAUTHENTICATED,
CellReturnCode.SERVICE_UNAVAILABLE: CellReturnCode.SERVICE_UNAVAILABLE,
}
def __init__(
self,
engine,
):
FLComponent.__init__(self)
self.engine = engine
self.cell = engine.get_cell()
self.ready = False
self.cell.add_incoming_request_filter(channel="*", topic="*", cb=self._filter_incoming_request)
self.cell.add_outgoing_reply_filter(channel="*", topic="*", cb=self._filter_outgoing_message)
self.cell.add_outgoing_request_filter(channel="*", topic="*", cb=self._filter_outgoing_message)
self.cell.add_incoming_reply_filter(channel="*", topic="*", cb=self._filter_incoming_message)
def new_cmi_message(self, fl_ctx: FLContext, headers=None, payload=None):
msg = Message(headers, payload)
msg.set_prop(self.PROP_KEY_FL_CTX, fl_ctx)
return msg
def _filter_incoming_message(self, message: Message):
public_props = message.get_header(self.HEADER_KEY_PEER_PROPS)
if public_props:
peer_ctx = self._make_peer_ctx(public_props)
message.set_prop(self.PROP_KEY_PEER_CTX, peer_ctx)
shareable = message.payload
if isinstance(shareable, Shareable):
if public_props:
shareable.set_peer_props(public_props)
def _filter_incoming_request(self, message: Message):
self._filter_incoming_message(message)
fl_ctx = self.engine.new_context()
peer_ctx = message.get_prop(self.PROP_KEY_PEER_CTX)
assert isinstance(fl_ctx, FLContext)
if peer_ctx:
fl_ctx.set_peer_context(peer_ctx)
message.set_prop(self.PROP_KEY_FL_CTX, fl_ctx)
def _filter_outgoing_message(self, message: Message):
fl_ctx = message.get_prop(self.PROP_KEY_FL_CTX)
if fl_ctx:
assert isinstance(fl_ctx, FLContext)
public_props = fl_ctx.get_all_public_props()
message.set_header(self.HEADER_KEY_PEER_PROPS, public_props)
ssid = fl_ctx.get_prop(CellMessageHeaderKeys.SSID)
if ssid:
message.set_header(self.HEADER_SSID, ssid)
project_name = fl_ctx.get_prop(CellMessageHeaderKeys.PROJECT_NAME)
if project_name:
message.set_header(self.HEADER_PROJECT_NAME, project_name)
client_name = fl_ctx.get_prop(FLContextKey.CLIENT_NAME)
if client_name:
message.set_header(self.HEADER_CLIENT_NAME, client_name)
client_token = fl_ctx.get_prop(CellMessageHeaderKeys.TOKEN)
if client_token:
message.set_header(self.HEADER_CLIENT_TOKEN, client_token)
@staticmethod
def _make_peer_ctx(props: dict) -> FLContext:
ctx = FLContext()
ctx.set_public_props(props)
return ctx
@staticmethod
def _convert_return_code(rc: CellReturnCode):
return CellMessageInterface.RC_TABLE.get(rc, ReturnCode.ERROR)
@abstractmethod
def send_to_cell(
self,
targets: [],
channel: str,
topic: str,
request: Shareable,
timeout: float,
fl_ctx: FLContext,
bulk_send=False,
) -> dict:
pass
class JobCellMessenger(CellMessageInterface):
def __init__(self, engine, job_id: str):
super().__init__(engine)
self.job_id = job_id
self.cell.add_incoming_request_filter(channel="*", topic="*", cb=self._filter_incoming)
self.cell.add_incoming_reply_filter(channel="*", topic="*", cb=self._filter_incoming)
self.cell.add_outgoing_request_filter(channel="*", topic="*", cb=self._filter_outgoing)
self.cell.add_outgoing_reply_filter(channel="*", topic="*", cb=self._filter_outgoing)
def _filter_incoming(self, message: Message):
job_id = message.get_header(self.HEADER_JOB_ID)
if job_id and job_id != self.job_id:
self.logger.error(f"received job id {job_id} != my job id {self.job_id}")
def _filter_outgoing(self, message: Message):
message.set_header(self.HEADER_JOB_ID, self.job_id)
def send_to_cell(
self,
targets: [],
channel: str,
topic: str,
request: Shareable,
timeout: float,
fl_ctx: FLContext,
bulk_send=False,
optional=False,
) -> dict:
"""Send request to the job cells of other target sites.
Args:
targets (list): list of client names that the request will be sent to
channel (str): channel of the request
topic (str): topic of the request
request (Shareable): request
timeout (float): how long to wait for result. 0 means fire-and-forget
fl_ctx (FLContext): the FL context
bulk_send: whether to bulk send this request (only applies in the fire-and-forget situation)
optional: whether the request is optional
Returns:
A dict of Shareables
"""
if not isinstance(request, Shareable):
raise ValueError(f"invalid request type: expect Shareable but got {type(request)}")
if not targets:
raise ValueError("targets must be specified")
if targets is not None and not isinstance(targets, list):
raise TypeError(f"targets must be a list of str, but got {type(targets)}")
if not isinstance(topic, str):
raise TypeError(f"invalid topic '{topic}': expects str but got {type(topic)}")
if not topic:
raise ValueError("invalid topic: must not be empty")
if not isinstance(timeout, float):
raise TypeError(f"invalid timeout: expects float but got {type(timeout)}")
if timeout < 0:
raise ValueError(f"invalid timeout value {timeout}: must >= 0.0")
if not isinstance(fl_ctx, FLContext):
raise TypeError(f"invalid fl_ctx: expects FLContext but got {type(fl_ctx)}")
request.set_header(ReservedHeaderKey.TOPIC, topic)
job_id = fl_ctx.get_job_id()
cell = self.engine.get_cell()
assert isinstance(cell, CoreCell)
target_names = []
for t in targets:
if not isinstance(t, str):
raise ValueError(f"invalid target name {t}: expect str but got {type(t)}")
if t not in target_names:
target_names.append(t)
target_fqcns = []
for name in target_names:
target_fqcns.append(FQCN.join([name, job_id]))
cell_msg = self.new_cmi_message(fl_ctx, payload=request)
if timeout > 0:
cell_replies = cell.broadcast_request(
channel=channel, topic=topic, request=cell_msg, targets=target_fqcns, timeout=timeout, optional=optional
)
replies = {}
if cell_replies:
for k, v in cell_replies.items():
assert isinstance(v, Message)
rc = v.get_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
client_name = FQCN.get_root(k)
if rc == CellReturnCode.OK:
result = v.payload
if not isinstance(result, Shareable):
self.logger.error(f"reply of {channel}:{topic} must be dict but got {type(result)}")
result = make_reply(ReturnCode.ERROR)
replies[client_name] = result
else:
src = self._convert_return_code(rc)
replies[client_name] = make_reply(src)
return replies
else:
if bulk_send:
cell.queue_message(channel=channel, topic=topic, message=cell_msg, targets=target_fqcns)
else:
cell.fire_and_forget(
channel=channel, topic=topic, message=cell_msg, targets=target_fqcns, optional=optional
)
return {}
| NVFlare-main | nvflare/private/fed/cmi.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Tuple
from nvflare.apis.app_validation import AppValidationKey, AppValidator
from nvflare.apis.fl_constant import JobConstants, SiteType, WorkspaceConstants
from nvflare.fuel.utils.config_factory import ConfigFactory
def _config_exists(app_root: str, config_folder: str, site_type: str) -> str:
if site_type == SiteType.SERVER:
config_to_check = JobConstants.SERVER_JOB_CONFIG
elif site_type == SiteType.CLIENT:
config_to_check = JobConstants.CLIENT_JOB_CONFIG
else:
config_to_check = None
if config_to_check and ConfigFactory.load_config(config_to_check, [os.path.join(app_root, config_folder)]) is None:
return f"Missing required config {config_to_check} inside {os.path.join(app_root, config_folder)} folder."
return ""
class DefaultAppValidator(AppValidator):
def __init__(self, site_type: str, config_folder: str = "config"):
self._site_type = site_type
self._config_folder = config_folder
def validate(self, app_folder: str) -> Tuple[str, Dict]:
result = dict()
app_root = os.path.abspath(app_folder)
if not os.path.exists(os.path.join(app_root, self._config_folder)):
return "Missing config folder inside app folder.", {}
err = _config_exists(app_root=app_root, config_folder=self._config_folder, site_type=self._site_type)
if err:
return err, {}
if os.path.exists(os.path.join(app_root, WorkspaceConstants.CUSTOM_FOLDER_NAME)):
result[AppValidationKey.BYOC] = True
return "", result
| NVFlare-main | nvflare/private/fed/app/default_app_validator.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FL Server / Client startup configer."""
import os
import re
import sys
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FilterKey, SiteType, SystemConfigs
from nvflare.apis.workspace import Workspace
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.fuel.utils.config_service import ConfigService
from nvflare.fuel.utils.json_scanner import Node
from nvflare.fuel.utils.wfconf import ConfigContext, ConfigError
from nvflare.private.defs import SSLConstants
from nvflare.private.fed.utils.fed_utils import configure_logging
from nvflare.private.json_configer import JsonConfigurator
from nvflare.private.privacy_manager import PrivacyManager, Scope
from .deployer.base_client_deployer import BaseClientDeployer
from .deployer.server_deployer import ServerDeployer
from .fl_app_validator import FLAppValidator
FL_PACKAGES = ["nvflare"]
FL_MODULES = ["server", "client", "app_common", "private", "app_opt"]
class FLServerStarterConfiger(JsonConfigurator):
"""FL Server startup configer."""
def __init__(self, workspace: Workspace, args, kv_list=None):
"""Init the FLServerStarterConfiger.
Args:
workspace: the workspace object
kv_list: key value pair list
"""
site_custom_folder = workspace.get_site_custom_dir()
if os.path.isdir(site_custom_folder):
sys.path.append(site_custom_folder)
self.args = args
base_pkgs = FL_PACKAGES
module_names = FL_MODULES
if kv_list:
assert isinstance(kv_list, list), "cmd_vars must be list, but got {}".format(type(kv_list))
self.cmd_vars = parse_vars(kv_list)
else:
self.cmd_vars = {}
configure_logging(workspace)
server_startup_file_path = workspace.get_server_startup_file_path()
resource_config_path = workspace.get_resources_file_path()
config_files = [server_startup_file_path, resource_config_path]
if args.job_id:
# this is for job process
job_resources_file_path = workspace.get_job_resources_file_path()
if os.path.exists(job_resources_file_path):
config_files.append(job_resources_file_path)
JsonConfigurator.__init__(
self,
config_file_name=config_files,
base_pkgs=base_pkgs,
module_names=module_names,
exclude_libs=True,
)
self.components = {} # id => component
self.handlers = []
self.workspace = workspace
self.server_config_file_names = config_files
self.deployer = None
self.app_validator = None
self.snapshot_persistor = None
self.overseer_agent = None
self.site_org = ""
def start_config(self, config_ctx: ConfigContext):
"""Start the config process.
Args:
config_ctx: config context
"""
super().start_config(config_ctx)
# loading server specifications
try:
for server in self.config_data["servers"]:
if server.get(SSLConstants.PRIVATE_KEY):
server[SSLConstants.PRIVATE_KEY] = self.workspace.get_file_path_in_startup(
server[SSLConstants.PRIVATE_KEY]
)
if server.get(SSLConstants.CERT):
server[SSLConstants.CERT] = self.workspace.get_file_path_in_startup(server[SSLConstants.CERT])
if server.get(SSLConstants.ROOT_CERT):
server[SSLConstants.ROOT_CERT] = self.workspace.get_file_path_in_startup(
server[SSLConstants.ROOT_CERT]
)
except Exception:
raise ValueError(f"Server config error: '{self.server_config_file_names}'")
def build_component(self, config_dict):
t = super().build_component(config_dict)
if isinstance(t, FLComponent):
if type(t).__name__ not in [type(h).__name__ for h in self.handlers]:
self.handlers.append(t)
return t
def process_config_element(self, config_ctx: ConfigContext, node: Node):
"""Process the config element.
Args:
config_ctx: config context
node: element node
"""
# JsonConfigurator.process_config_element(self, config_ctx, node)
element = node.element
path = node.path()
if path == "app_validator" and isinstance(element, dict):
self.app_validator = self.build_component(element)
return
if path == "snapshot_persistor":
self.snapshot_persistor = self.build_component(element)
return
if path == "overseer_agent":
self.overseer_agent = self.build_component(element)
return
if re.search(r"^components\.#[0-9]+$", path):
c = self.build_component(element)
cid = element.get("id", None)
if not cid:
raise ConfigError("missing component id")
if not isinstance(cid, str):
raise ConfigError('"id" must be str but got {}'.format(type(cid)))
if cid in self.components:
raise ConfigError('duplicate component id "{}"'.format(cid))
self.components[cid] = c
return
def finalize_config(self, config_ctx: ConfigContext):
"""Finalize the config process.
Args:
config_ctx: config context
"""
secure_train = False
if self.cmd_vars.get("secure_train"):
secure_train = self.cmd_vars["secure_train"]
custom_validators = [self.app_validator] if self.app_validator else []
self.app_validator = FLAppValidator(site_type=SiteType.SERVER, custom_validators=custom_validators)
build_ctx = {
"secure_train": secure_train,
"app_validator": self.app_validator,
"server_config": self.config_data["servers"],
"server_host": self.cmd_vars.get("host", None),
"site_org": self.cmd_vars.get("org", ""),
"snapshot_persistor": self.snapshot_persistor,
"overseer_agent": self.overseer_agent,
"server_components": self.components,
"server_handlers": self.handlers,
}
deployer = ServerDeployer()
deployer.build(build_ctx)
self.deployer = deployer
self.site_org = build_ctx["site_org"]
ConfigService.initialize(
section_files={
SystemConfigs.STARTUP_CONF: os.path.basename(self.server_config_file_names[0]),
SystemConfigs.RESOURCES_CONF: os.path.basename(self.server_config_file_names[1]),
},
config_path=[self.args.workspace],
parsed_args=self.args,
var_dict=self.cmd_vars,
)
class FLClientStarterConfiger(JsonConfigurator):
"""FL Client startup configer."""
def __init__(self, workspace: Workspace, args, kv_list=None):
"""Init the FLClientStarterConfiger.
Args:
workspace: the workspace object
kv_list: key value pair list
"""
site_custom_folder = workspace.get_site_custom_dir()
if os.path.isdir(site_custom_folder):
sys.path.append(site_custom_folder)
self.args = args
base_pkgs = FL_PACKAGES
module_names = FL_MODULES
if kv_list:
assert isinstance(kv_list, list), "cmd_vars must be list, but got {}".format(type(kv_list))
self.cmd_vars = parse_vars(kv_list)
else:
self.cmd_vars = {}
configure_logging(workspace)
client_startup_file_path = workspace.get_client_startup_file_path()
resources_file_path = workspace.get_resources_file_path()
config_files = [client_startup_file_path, resources_file_path]
if args.job_id:
# this is for job process
job_resources_file_path = workspace.get_job_resources_file_path()
if os.path.exists(job_resources_file_path):
config_files.append(job_resources_file_path)
JsonConfigurator.__init__(
self,
config_file_name=config_files,
base_pkgs=base_pkgs,
module_names=module_names,
exclude_libs=True,
)
self.components = {} # id => component
self.handlers = []
self.workspace = workspace
self.client_config_file_names = config_files
self.base_deployer = None
self.overseer_agent = None
self.site_org = ""
self.app_validator = None
def process_config_element(self, config_ctx: ConfigContext, node: Node):
"""Process config element.
Args:
config_ctx: config context
node: element node
"""
element = node.element
path = node.path()
if path == "app_validator" and isinstance(element, dict):
self.app_validator = self.build_component(element)
return
if path == "overseer_agent":
self.overseer_agent = self.build_component(element)
return
if re.search(r"^components\.#[0-9]+$", path):
c = self.build_component(element)
cid = element.get("id", None)
if not cid:
raise ConfigError("missing component id")
if not isinstance(cid, str):
raise ConfigError('"id" must be str but got {}'.format(type(cid)))
if cid in self.components:
raise ConfigError('duplicate component id "{}"'.format(cid))
self.components[cid] = c
return
def build_component(self, config_dict):
t = super().build_component(config_dict)
if isinstance(t, FLComponent):
if type(t).__name__ not in [type(h).__name__ for h in self.handlers]:
self.handlers.append(t)
return t
def start_config(self, config_ctx: ConfigContext):
"""Start the config process.
Args:
config_ctx: config context
"""
super().start_config(config_ctx)
try:
client = self.config_data["client"]
if client.get(SSLConstants.PRIVATE_KEY):
client[SSLConstants.PRIVATE_KEY] = self.workspace.get_file_path_in_startup(
client[SSLConstants.PRIVATE_KEY]
)
if client.get(SSLConstants.CERT):
client[SSLConstants.CERT] = self.workspace.get_file_path_in_startup(client[SSLConstants.CERT])
if client.get(SSLConstants.ROOT_CERT):
client[SSLConstants.ROOT_CERT] = self.workspace.get_file_path_in_startup(client[SSLConstants.ROOT_CERT])
except Exception:
raise ValueError(f"Client config error: '{self.client_config_file_names}'")
def finalize_config(self, config_ctx: ConfigContext):
"""Finalize the config process.
Args:
config_ctx: config context
"""
secure_train = False
if self.cmd_vars.get("secure_train"):
secure_train = self.cmd_vars["secure_train"]
build_ctx = {
"client_name": self.cmd_vars.get("uid", ""),
"site_org": self.cmd_vars.get("org", ""),
"server_config": self.config_data.get("servers", []),
"client_config": self.config_data["client"],
"secure_train": secure_train,
"server_host": self.cmd_vars.get("host", None),
"overseer_agent": self.overseer_agent,
"client_components": self.components,
"client_handlers": self.handlers,
}
custom_validators = [self.app_validator] if self.app_validator else []
self.app_validator = FLAppValidator(site_type=SiteType.CLIENT, custom_validators=custom_validators)
self.site_org = build_ctx["site_org"]
self.base_deployer = BaseClientDeployer()
self.base_deployer.build(build_ctx)
ConfigService.initialize(
section_files={
SystemConfigs.STARTUP_CONF: os.path.basename(self.client_config_file_names[0]),
SystemConfigs.RESOURCES_CONF: os.path.basename(self.client_config_file_names[1]),
},
config_path=[self.args.workspace],
parsed_args=self.args,
var_dict=self.cmd_vars,
)
class FLAdminClientStarterConfigurator(JsonConfigurator):
"""FL Admin Client startup configurator."""
def __init__(self, workspace: Workspace):
"""Uses the json configuration to start the FL admin client.
Args:
workspace: the workspace object
"""
base_pkgs = FL_PACKAGES
module_names = FL_MODULES
admin_config_file_path = workspace.get_admin_startup_file_path()
JsonConfigurator.__init__(
self,
config_file_name=admin_config_file_path,
base_pkgs=base_pkgs,
module_names=module_names,
exclude_libs=True,
)
self.workspace = workspace
self.admin_config_file_path = admin_config_file_path
self.base_deployer = None
self.overseer_agent = None
def process_config_element(self, config_ctx: ConfigContext, node: Node):
"""Process config element.
Args:
config_ctx: config context
node: element node
"""
element = node.element
path = node.path()
if path == "admin.overseer_agent":
self.overseer_agent = self.build_component(element)
return
def start_config(self, config_ctx: ConfigContext):
"""Start the config process.
Args:
config_ctx: config context
"""
super().start_config(config_ctx)
try:
admin = self.config_data["admin"]
if admin.get("client_key"):
admin["client_key"] = self.workspace.get_file_path_in_startup(admin["client_key"])
if admin.get("client_cert"):
admin["client_cert"] = self.workspace.get_file_path_in_startup(admin["client_cert"])
if admin.get("ca_cert"):
admin["ca_cert"] = self.workspace.get_file_path_in_startup(admin["ca_cert"])
if admin.get("upload_dir"):
admin["upload_dir"] = self.workspace.get_file_path_in_root(admin["upload_dir"])
if admin.get("download_dir"):
admin["download_dir"] = self.workspace.get_file_path_in_root(admin["download_dir"])
except Exception:
raise ValueError(f"Client config error: '{self.admin_config_file_path}'")
class PrivacyConfiger(JsonConfigurator):
def __init__(self, workspace: Workspace, names_only: bool, is_server=False):
"""Uses the json configuration to start the FL admin client.
Args:
workspace: the workspace object
"""
self.privacy_manager = None
self.scopes = []
self.default_scope_name = None
self.components = {}
self.current_scope = None
self.names_only = names_only
self.is_server = is_server
privacy_file_path = workspace.get_site_privacy_file_path()
JsonConfigurator.__init__(
self,
config_file_name=privacy_file_path,
base_pkgs=FL_PACKAGES,
module_names=FL_MODULES,
exclude_libs=True,
)
def process_config_element(self, config_ctx: ConfigContext, node: Node):
"""Process config element.
Args:
config_ctx: config context
node: element node
"""
element = node.element
path = node.path()
if re.search(r"^scopes\.#[0-9]+$", path):
scope = Scope()
self.current_scope = scope
self.scopes.append(scope)
return
if re.search(r"^scopes\.#[0-9]+\.name$", path):
self.current_scope.set_name(element)
return
if path == "default_scope":
self.default_scope_name = element
return
if not self.names_only:
if re.search(r"^scopes\.#[0-9]+\.properties$", path):
self.current_scope.set_props(element)
return
if re.search(r"^scopes.#[0-9]+\.task_data_filters\.#[0-9]+$", path):
f = self.build_component(element)
direction = element.get("direction")
if direction:
direction = direction.lower()
else:
direction = FilterKey.OUT if self.is_server else FilterKey.IN
if f:
self.current_scope.add_task_data_filter(f, direction)
return
if re.search(r"^scopes.#[0-9]+\.task_result_filters\.#[0-9]+$", path):
f = self.build_component(element)
direction = element.get("direction")
if direction:
direction = direction.lower()
else:
direction = FilterKey.IN if self.is_server else FilterKey.OUT
if f:
self.current_scope.add_task_result_filter(f, direction)
return
if re.search(r"^components\.#[0-9]+$", path):
c = self.build_component(element)
cid = element.get("id", None)
if not cid:
raise ConfigError("missing component id")
if not isinstance(cid, str):
raise ConfigError('"id" must be str but got {}'.format(type(cid)))
if cid in self.components:
raise ConfigError('duplicate component id "{}"'.format(cid))
self.components[cid] = c
return
def finalize_config(self, config_ctx: ConfigContext):
self.privacy_manager = PrivacyManager(
scopes=self.scopes, default_scope_name=self.default_scope_name, components=self.components
)
def create_privacy_manager(workspace: Workspace, names_only: bool, is_server=False):
privacy_file_path = workspace.get_site_privacy_file_path()
if not os.path.isfile(privacy_file_path):
# privacy policy not defined
mgr = PrivacyManager(scopes=None, default_scope_name=None, components=None)
else:
configer = PrivacyConfiger(workspace, names_only, is_server=is_server)
configer.configure()
mgr = configer.privacy_manager
return mgr
| NVFlare-main | nvflare/private/fed/app/fl_conf.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FL Application package."""
| NVFlare-main | nvflare/private/fed/app/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Tuple
from nvflare.apis.app_validation import AppValidator
from .default_app_validator import DefaultAppValidator
class FLAppValidator(AppValidator):
def __init__(self, site_type: str, custom_validators: Optional[List[AppValidator]] = None):
super().__init__()
self.validators = [DefaultAppValidator(site_type=site_type)]
if custom_validators:
if not isinstance(custom_validators, list):
raise TypeError("custom_validators must be list, but got {}".format(type(custom_validators)))
for validator in custom_validators:
if not isinstance(validator, AppValidator):
raise TypeError("validator must be AppValidator, but got {}".format(type(validator)))
self.validators.append(validator)
def validate(self, app_folder: str) -> Tuple[str, Dict]:
final_result = {}
for v in self.validators:
err, result = v.validate(app_folder)
if err:
return err, result
if result:
final_result.update(result)
return "", final_result
| NVFlare-main | nvflare/private/fed/app/fl_app_validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import threading
import time
import psutil
from nvflare.fuel.hci.security import hash_password
from nvflare.private.defs import SSLConstants
from nvflare.private.fed.runner import Runner
from nvflare.private.fed.server.admin import FedAdminServer
from nvflare.private.fed.server.fed_server import FederatedServer
def monitor_parent_process(runner: Runner, parent_pid, stop_event: threading.Event):
while True:
if stop_event.is_set() or not psutil.pid_exists(parent_pid):
runner.stop()
break
time.sleep(1)
def check_parent_alive(parent_pid, stop_event: threading.Event):
while True:
if stop_event.is_set() or not psutil.pid_exists(parent_pid):
pid = os.getpid()
kill_child_processes(pid)
os.killpg(os.getpgid(pid), 9)
break
time.sleep(1)
def kill_child_processes(parent_pid, sig=signal.SIGTERM):
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
children = parent.children(recursive=True)
for process in children:
process.send_signal(sig)
def create_admin_server(fl_server: FederatedServer, server_conf=None, args=None, secure_train=False):
"""To create the admin server.
Args:
fl_server: fl_server
server_conf: server config
args: command args
secure_train: True/False
Returns:
A FedAdminServer.
"""
users = {}
# Create a default user admin:admin for the POC insecure use case.
if not secure_train:
users = {"admin": hash_password("admin")}
root_cert = server_conf[SSLConstants.ROOT_CERT] if secure_train else None
server_cert = server_conf[SSLConstants.CERT] if secure_train else None
server_key = server_conf[SSLConstants.PRIVATE_KEY] if secure_train else None
admin_server = FedAdminServer(
cell=fl_server.cell,
fed_admin_interface=fl_server.engine,
users=users,
cmd_modules=fl_server.cmd_modules,
file_upload_dir=os.path.join(args.workspace, server_conf.get("admin_storage", "tmp")),
file_download_dir=os.path.join(args.workspace, server_conf.get("admin_storage", "tmp")),
host=server_conf.get("admin_host", "localhost"),
port=server_conf.get("admin_port", 5005),
ca_cert_file_name=root_cert,
server_cert_file_name=server_cert,
server_key_file_name=server_key,
accepted_client_cns=None,
download_job_url=server_conf.get("download_job_url", "http://"),
)
return admin_server
| NVFlare-main | nvflare/private/fed/app/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FL Server application packagee."""
| NVFlare-main | nvflare/private/fed/app/deployer/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
from nvflare.fuel.f3.cellnet.cell import Cell
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.utils.dict_utils import augment
from nvflare.fuel.utils.network_utils import get_open_ports
from nvflare.private.fed.app.utils import create_admin_server
from nvflare.private.fed.simulator.simulator_client_engine import SimulatorParentClientEngine
from nvflare.private.fed.simulator.simulator_server import SimulatorServer
from nvflare.security.logging import secure_format_exception
from .base_client_deployer import BaseClientDeployer
from .server_deployer import ServerDeployer
class SimulatorDeployer(ServerDeployer):
def __init__(self):
super().__init__()
self.open_ports = get_open_ports(2)
self.admin_storage = tempfile.mkdtemp()
def create_fl_server(self, args, secure_train=False):
simulator_server = self._create_simulator_server_config(self.admin_storage, args.max_clients)
heart_beat_timeout = simulator_server.get("heart_beat_timeout", 600)
services = SimulatorServer(
project_name=simulator_server.get("name", ""),
max_num_clients=simulator_server.get("max_num_clients", 100),
cmd_modules=self.cmd_modules,
args=args,
secure_train=secure_train,
snapshot_persistor=self.snapshot_persistor,
overseer_agent=self.overseer_agent,
heart_beat_timeout=heart_beat_timeout,
)
services.deploy(args, grpc_args=simulator_server)
admin_server = create_admin_server(
services,
server_conf=simulator_server,
args=args,
secure_train=False,
)
admin_server.start()
services.set_admin_server(admin_server)
# mpm.add_cleanup_cb(admin_server.stop)
return simulator_server, services
def create_fl_client(self, client_name, args):
client_config, build_ctx = self._create_simulator_client_config(client_name, args)
deployer = BaseClientDeployer()
deployer.build(build_ctx)
federated_client = deployer.create_fed_client(args)
self._create_client_cell(client_config, client_name, federated_client)
federated_client.register()
client_engine = SimulatorParentClientEngine(federated_client, federated_client.token, args)
federated_client.set_client_engine(client_engine)
# federated_client.start_heartbeat()
federated_client.run_manager = None
return federated_client, client_config, args, build_ctx
def _create_client_cell(self, client_config, client_name, federated_client):
target = client_config["servers"][0].get("service").get("target")
scheme = client_config["servers"][0].get("service").get("scheme", "grpc")
credentials = {}
parent_url = None
cell = Cell(
fqcn=client_name,
root_url=scheme + "://" + target,
secure=self.secure_train,
credentials=credentials,
create_internal_listener=False,
parent_url=parent_url,
)
cell.start()
federated_client.cell = cell
federated_client.communicator.cell = cell
# if self.engine:
# self.engine.admin_agent.register_cell_cb()
mpm.add_cleanup_cb(cell.stop)
def _create_simulator_server_config(self, admin_storage, max_clients):
simulator_server = {
"name": "simulator_server",
"service": {
"target": "localhost:" + str(self.open_ports[0]),
"scheme": "tcp",
},
"admin_host": "localhost",
"admin_port": self.open_ports[1],
"max_num_clients": max_clients,
"heart_beat_timeout": 600,
"num_server_workers": 4,
"compression": "Gzip",
"admin_storage": admin_storage,
"download_job_url": "http://download.server.com/",
}
return simulator_server
def _create_simulator_client_config(self, client_name, args):
client_config = {
"servers": [
{
"name": "simulator_server",
"service": {
"target": "localhost:" + str(self.open_ports[0]),
"scheme": "tcp",
},
}
],
"client": {"retry_timeout": 30, "compression": "Gzip"},
}
resources = os.path.join(args.workspace, "local/resources.json")
if os.path.exists(resources):
with open(resources) as file:
try:
data = json.load(file)
augment(to_dict=client_config, from_dict=data, from_override_to=False)
except Exception as e:
raise RuntimeError(f"Error processing config file {resources}: {secure_format_exception(e)}")
build_ctx = {
"client_name": client_name,
"server_config": client_config.get("servers", []),
"client_config": client_config["client"],
"server_host": None,
"secure_train": False,
"enable_byoc": True,
"overseer_agent": None,
"client_components": {},
"client_handlers": None,
}
return client_config, build_ctx
def close(self):
shutil.rmtree(self.admin_storage)
super().close()
| NVFlare-main | nvflare/private/fed/app/deployer/simulator_deployer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
from nvflare.apis.fl_context import FLContext
from nvflare.private.fed.client.admin import RequestProcessor
from nvflare.private.fed.client.client_req_processors import ClientRequestProcessors
from nvflare.private.fed.client.fed_client import FederatedClient
class BaseClientDeployer:
def __init__(self):
"""To init the BaseClientDeployer."""
self.multi_gpu = False
self.outbound_filters = None
self.inbound_filters = None
self.federated_client = None
self.model_validator = None
self.cross_val_participating = False
self.model_registry_path = None
self.cross_val_timeout = None
self.executors = None
self.req_processors = ClientRequestProcessors.request_processors
def build(self, build_ctx):
self.server_config = build_ctx["server_config"]
self.client_config = build_ctx["client_config"]
self.secure_train = build_ctx["secure_train"]
self.client_name = build_ctx["client_name"]
self.host = build_ctx["server_host"]
self.overseer_agent = build_ctx["overseer_agent"]
self.components = build_ctx["client_components"]
self.handlers = build_ctx["client_handlers"]
def set_model_manager(self, model_manager):
self.model_manager = model_manager
def create_fed_client(self, args, sp_target=None):
if sp_target:
for item in self.server_config:
service = item["service"]
service["target"] = sp_target
servers = [{t["name"]: t["service"]} for t in self.server_config]
retry_timeout = 30
if "retry_timeout" in self.client_config:
retry_timeout = self.client_config["retry_timeout"]
compression = grpc.Compression.NoCompression
if "Deflate" == self.client_config.get("compression"):
compression = grpc.Compression.Deflate
elif "Gzip" == self.client_config.get("compression"):
compression = grpc.Compression.Gzip
for _, processor in self.components.items():
if isinstance(processor, RequestProcessor):
self.req_processors.append(processor)
self.federated_client = FederatedClient(
client_name=str(self.client_name),
# We only deploy the first server right now .....
server_args=sorted(servers)[0],
client_args=self.client_config,
secure_train=self.secure_train,
retry_timeout=retry_timeout,
executors=self.executors,
compression=compression,
overseer_agent=self.overseer_agent,
args=args,
components=self.components,
handlers=self.handlers,
)
return self.federated_client
def finalize(self, fl_ctx: FLContext):
self.close()
def close(self):
# if self.federated_client:
# self.federated_client.model_manager.close()
pass
| NVFlare-main | nvflare/private/fed/app/deployer/base_client_deployer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FL Server deployer."""
import threading
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import SystemComponents
from nvflare.apis.workspace import Workspace
from nvflare.private.fed.server.fed_server import FederatedServer
from nvflare.private.fed.server.job_runner import JobRunner
from nvflare.private.fed.server.run_manager import RunManager
from nvflare.private.fed.server.server_cmd_modules import ServerCommandModules
class ServerDeployer:
"""FL Server deployer."""
def __init__(self):
"""Init the ServerDeployer."""
self.cmd_modules = ServerCommandModules.cmd_modules
self.server_config = None
self.secure_train = None
self.app_validator = None
self.host = None
self.snapshot_persistor = None
self.overseer_agent = None
self.components = None
self.handlers = None
def build(self, build_ctx):
"""To build the ServerDeployer.
Args:
build_ctx: build context
"""
self.server_config = build_ctx["server_config"]
self.secure_train = build_ctx["secure_train"]
self.app_validator = build_ctx["app_validator"]
self.host = build_ctx["server_host"]
self.snapshot_persistor = build_ctx["snapshot_persistor"]
self.overseer_agent = build_ctx["overseer_agent"]
self.components = build_ctx["server_components"]
self.handlers = build_ctx["server_handlers"]
def create_fl_server(self, args, secure_train=False):
"""To create the FL Server.
Args:
args: command args
secure_train: True/False
Returns: FL Server
"""
# We only deploy the first server right now .....
first_server = sorted(self.server_config)[0]
heart_beat_timeout = first_server.get("heart_beat_timeout", 600)
if self.host:
target = first_server["service"].get("target", None)
first_server["service"]["target"] = self.host + ":" + target.split(":")[1]
services = FederatedServer(
project_name=first_server.get("name", ""),
min_num_clients=first_server.get("min_num_clients", 1),
max_num_clients=first_server.get("max_num_clients", 100),
cmd_modules=self.cmd_modules,
heart_beat_timeout=heart_beat_timeout,
args=args,
secure_train=secure_train,
snapshot_persistor=self.snapshot_persistor,
overseer_agent=self.overseer_agent,
shutdown_period=first_server.get("shutdown_period", 30.0),
check_engine_frequency=first_server.get("check_engine_frequency", 3.0),
)
return first_server, services
def deploy(self, args):
"""To deploy the FL server services.
Args:
args: command args.
Returns: FL Server
"""
first_server, services = self.create_fl_server(args, secure_train=self.secure_train)
services.deploy(args, grpc_args=first_server, secure_train=self.secure_train)
job_runner = JobRunner(workspace_root=args.workspace)
workspace = Workspace(args.workspace, "server", args.config_folder)
run_manager = RunManager(
server_name=services.project_name,
engine=services.engine,
job_id="",
workspace=workspace,
components=self.components,
handlers=self.handlers,
)
job_manager = self.components.get(SystemComponents.JOB_MANAGER)
services.engine.set_run_manager(run_manager)
services.engine.set_job_runner(job_runner, job_manager)
run_manager.add_handler(job_runner)
run_manager.add_component(SystemComponents.JOB_RUNNER, job_runner)
fl_ctx = services.engine.new_context()
threading.Thread(target=self._start_job_runner, args=[job_runner, fl_ctx]).start()
services.engine.fire_event(EventType.SYSTEM_START, services.engine.new_context())
print("deployed FL server trainer.")
return services
def _start_job_runner(self, job_runner, fl_ctx):
job_runner.run(fl_ctx)
def close(self):
"""To close the services."""
pass
| NVFlare-main | nvflare/private/fed/app/deployer/server_deployer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a command line interface for federated server."""
import argparse
import logging
import os
import sys
import threading
from nvflare.apis.fl_constant import JobConstants
from nvflare.apis.workspace import Workspace
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.sec.audit import AuditService
from nvflare.fuel.sec.security_content_service import SecurityContentService
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.private.defs import AppFolderConstants
from nvflare.private.fed.app.fl_conf import FLServerStarterConfiger
from nvflare.private.fed.app.utils import monitor_parent_process
from nvflare.private.fed.server.server_app_runner import ServerAppRunner
from nvflare.private.fed.server.server_state import HotState
from nvflare.private.fed.utils.fed_utils import (
add_logfile_handler,
create_stats_pool_files_for_job,
fobs_initialize,
set_stats_pool_config_for_job,
)
from nvflare.security.logging import secure_format_exception, secure_log_traceback
def main():
"""FL Server program starting point."""
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
parser.add_argument(
"--fed_server", "-s", type=str, help="an aggregation server specification json file", required=True
)
parser.add_argument("--app_root", "-r", type=str, help="App Root", required=True)
parser.add_argument("--job_id", "-n", type=str, help="job id", required=True)
parser.add_argument("--root_url", "-u", type=str, help="root_url", required=True)
parser.add_argument("--host", "-host", type=str, help="server host", required=True)
parser.add_argument("--port", "-port", type=str, help="service port", required=True)
parser.add_argument("--ssid", "-id", type=str, help="SSID", required=True)
parser.add_argument("--parent_url", "-p", type=str, help="parent_url", required=True)
parser.add_argument("--ha_mode", "-ha_mode", type=str, help="HA mode", required=True)
parser.add_argument("--set", metavar="KEY=VALUE", nargs="*")
args = parser.parse_args()
kv_list = parse_vars(args.set)
config_folder = kv_list.get("config_folder", "")
if config_folder == "":
args.server_config = JobConstants.SERVER_JOB_CONFIG
else:
args.server_config = os.path.join(config_folder, JobConstants.SERVER_JOB_CONFIG)
# TODO:: remove env and train config since they are not core
args.env = os.path.join("config", AppFolderConstants.CONFIG_ENV)
args.config_folder = config_folder
args.log_config = None
args.snapshot = kv_list.get("restore_snapshot")
# get parent process id
parent_pid = os.getppid()
stop_event = threading.Event()
workspace = Workspace(root_dir=args.workspace, site_name="server")
set_stats_pool_config_for_job(workspace, args.job_id)
try:
os.chdir(args.workspace)
fobs_initialize()
SecurityContentService.initialize(content_folder=workspace.get_startup_kit_dir())
# Initialize audit service since the job execution will need it!
audit_file_name = workspace.get_audit_file_path()
AuditService.initialize(audit_file_name)
conf = FLServerStarterConfiger(
workspace=workspace,
args=args,
kv_list=args.set,
)
log_file = workspace.get_app_log_file_path(args.job_id)
add_logfile_handler(log_file)
logger = logging.getLogger("runner_process")
logger.info("Runner_process started.")
log_level = os.environ.get("FL_LOG_LEVEL", "")
numeric_level = getattr(logging, log_level.upper(), None)
if isinstance(numeric_level, int):
logging.getLogger().setLevel(numeric_level)
logger.debug("loglevel debug enabled")
logger.info("loglevel info enabled")
logger.warning("loglevel warn enabled")
logger.error("loglevel error enabled")
logger.critical("loglevel critical enabled")
conf.configure()
event_handlers = conf.handlers
deployer = conf.deployer
secure_train = conf.cmd_vars.get("secure_train", False)
try:
# create the FL server
server_config, server = deployer.create_fl_server(args, secure_train=secure_train)
server.ha_mode = eval(args.ha_mode)
server.cell = server.create_job_cell(
args.job_id, args.root_url, args.parent_url, secure_train, server_config
)
server.server_state = HotState(host=args.host, port=args.port, ssid=args.ssid)
snapshot = None
if args.snapshot:
snapshot = server.snapshot_persistor.retrieve_run(args.job_id)
server_app_runner = ServerAppRunner(server)
# start parent process checking thread
thread = threading.Thread(target=monitor_parent_process, args=(server_app_runner, parent_pid, stop_event))
thread.start()
server_app_runner.start_server_app(
workspace, args, args.app_root, args.job_id, snapshot, logger, args.set, event_handlers=event_handlers
)
finally:
if deployer:
deployer.close()
stop_event.set()
AuditService.close()
err = create_stats_pool_files_for_job(workspace, args.job_id)
if err:
logger.warning(err)
except ConfigError as e:
logger = logging.getLogger("runner_process")
logger.exception(f"ConfigError: {secure_format_exception(e)}")
secure_log_traceback(logger)
raise e
if __name__ == "__main__":
"""
This is the program when starting the child process for running the NVIDIA FLARE server runner.
"""
# main()
rc = mpm.run(main_func=main)
sys.exit(rc)
| NVFlare-main | nvflare/private/fed/app/server/runner_process.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FL Server application packagee."""
| NVFlare-main | nvflare/private/fed/app/server/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Federated server launching script."""
import argparse
import logging
import os
import sys
import time
from nvflare.apis.fl_constant import JobConstants, SiteType, WorkspaceConstants
from nvflare.apis.workspace import Workspace
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.private.defs import AppFolderConstants
from nvflare.private.fed.app.fl_conf import FLServerStarterConfiger, create_privacy_manager
from nvflare.private.fed.app.utils import create_admin_server
from nvflare.private.fed.server.server_status import ServerStatus
from nvflare.private.fed.utils.fed_utils import add_logfile_handler, fobs_initialize, security_init
from nvflare.private.privacy_manager import PrivacyService
from nvflare.security.logging import secure_format_exception
def main():
if sys.version_info >= (3, 11):
raise RuntimeError("Python versions 3.11 and above are not yet supported. Please use Python 3.8, 3.9 or 3.10.")
if sys.version_info < (3, 8):
raise RuntimeError("Python versions 3.7 and below are not supported. Please use Python 3.8, 3.9 or 3.10")
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
parser.add_argument(
"--fed_server", "-s", type=str, help="an aggregation server specification json file", required=True
)
parser.add_argument("--set", metavar="KEY=VALUE", nargs="*")
args = parser.parse_args()
kv_list = parse_vars(args.set)
config_folder = kv_list.get("config_folder", "")
if config_folder == "":
args.server_config = JobConstants.SERVER_JOB_CONFIG
else:
args.server_config = os.path.join(config_folder, JobConstants.SERVER_JOB_CONFIG)
# TODO:: remove env and train config since they are not core
args.env = os.path.join("config", AppFolderConstants.CONFIG_ENV)
args.train_config = os.path.join("config", AppFolderConstants.CONFIG_TRAIN)
args.config_folder = config_folder
logger = logging.getLogger()
args.log_config = None
args.job_id = None
workspace = Workspace(root_dir=args.workspace, site_name="server")
for name in [WorkspaceConstants.RESTART_FILE, WorkspaceConstants.SHUTDOWN_FILE]:
try:
f = workspace.get_file_path_in_root(name)
if os.path.exists(f):
os.remove(f)
except Exception:
print(f"Could not remove file '{name}'. Please check your system before starting FL.")
sys.exit(-1)
try:
os.chdir(args.workspace)
fobs_initialize()
conf = FLServerStarterConfiger(
workspace=workspace,
args=args,
kv_list=args.set,
)
log_level = os.environ.get("FL_LOG_LEVEL", "")
numeric_level = getattr(logging, log_level.upper(), None)
if isinstance(numeric_level, int):
logging.getLogger().setLevel(numeric_level)
logger.debug("loglevel debug enabled")
logger.info("loglevel info enabled")
logger.warning("loglevel warn enabled")
logger.error("loglevel error enabled")
logger.critical("loglevel critical enabled")
conf.configure()
log_file = workspace.get_log_file_path()
add_logfile_handler(log_file)
deployer = conf.deployer
secure_train = conf.cmd_vars.get("secure_train", False)
security_init(
secure_train=secure_train,
site_org=conf.site_org,
workspace=workspace,
app_validator=conf.app_validator,
site_type=SiteType.SERVER,
)
# initialize Privacy Service
privacy_manager = create_privacy_manager(workspace, names_only=True, is_server=True)
PrivacyService.initialize(privacy_manager)
admin_server = None
try:
# Deploy the FL server
services = deployer.deploy(args)
first_server = sorted(conf.config_data["servers"])[0]
# allow command to overwrite the admin_host
if conf.cmd_vars.get("host", None):
first_server["admin_host"] = conf.cmd_vars["host"]
admin_server = create_admin_server(
services,
server_conf=first_server,
args=args,
secure_train=secure_train,
)
admin_server.start()
services.set_admin_server(admin_server)
# mpm.add_cleanup_cb(admin_server.stop)
finally:
deployer.close()
logger.info("Server started")
# From Python 3.9 and above, the ThreadPoolExecutor does not allow submit() to create a new thread while the
# main thread has exited. Use the ServerStatus.SHUTDOWN to keep the main thread waiting for the gRPC
# server to be shutdown.
while services.status != ServerStatus.SHUTDOWN:
time.sleep(1.0)
if admin_server:
admin_server.stop()
services.engine.close()
except ConfigError as e:
logger.exception(f"ConfigError: {secure_format_exception(e)}")
raise e
if __name__ == "__main__":
"""
This is the main program when starting the NVIDIA FLARE server process.
"""
rc = mpm.run(main_func=main)
sys.exit(rc)
| NVFlare-main | nvflare/private/fed/app/server/server_train.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging.config
import os
import sys
import threading
import time
from multiprocessing.connection import Listener
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey, WorkspaceConstants
from nvflare.fuel.common.multi_process_executor_constants import CommunicationMetaData
from nvflare.fuel.f3.cellnet.cell import Cell
from nvflare.fuel.f3.cellnet.fqcn import FQCN
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.hci.server.authz import AuthorizationService
from nvflare.fuel.sec.audit import AuditService
from nvflare.private.fed.app.deployer.base_client_deployer import BaseClientDeployer
from nvflare.private.fed.app.utils import check_parent_alive
from nvflare.private.fed.client.client_engine import ClientEngine
from nvflare.private.fed.client.client_status import ClientStatus
from nvflare.private.fed.client.fed_client import FederatedClient
from nvflare.private.fed.simulator.simulator_app_runner import SimulatorClientAppRunner
from nvflare.private.fed.simulator.simulator_audit import SimulatorAuditor
from nvflare.private.fed.simulator.simulator_const import SimulatorConstants
from nvflare.private.fed.utils.fed_utils import add_logfile_handler, fobs_initialize
from nvflare.security.logging import secure_format_exception
from nvflare.security.security import EmptyAuthorizer
CELL_CONNECT_CHECK_TIMEOUT = 10.0
FETCH_TASK_RUN_RETRY = 3
class ClientTaskWorker(FLComponent):
def create_client_engine(self, federated_client: FederatedClient, args, rank=0):
client_engine = ClientEngine(federated_client, federated_client.token, args, rank)
federated_client.set_client_engine(client_engine)
federated_client.run_manager = None
client_engine.fire_event(EventType.SYSTEM_START, client_engine.new_context())
def create_client_runner(self, client):
"""Create the ClientRunner for the client to run the ClientApp.
Args:
client: the client to run
"""
app_client_root = client.app_client_root
args = client.args
args.client_name = client.client_name
args.token = client.token
client_app_runner = SimulatorClientAppRunner()
client_app_runner.client_runner = client_app_runner.create_client_runner(
app_client_root, args, args.config_folder, client, False
)
client_runner = client_app_runner.client_runner
with client_runner.engine.new_context() as fl_ctx:
client_app_runner.start_command_agent(args, client, fl_ctx)
client_app_runner.sync_up_parents_process(client)
client_runner.engine.cell = client.cell
client_runner.init_run(app_client_root, args)
def do_one_task(self, client):
stop_run = False
# Create the ClientRunManager and ClientRunner for the new client to run
try:
if client.run_manager is None:
self.create_client_runner(client)
self.logger.info(f"Initialize ClientRunner for client: {client.client_name}")
with client.run_manager.new_context() as fl_ctx:
client_runner = fl_ctx.get_prop(FLContextKey.RUNNER)
self.fire_event(EventType.SWAP_IN, fl_ctx)
run_task_tries = 0
while True:
interval, task_processed = client_runner.fetch_and_run_one_task(fl_ctx)
if task_processed:
self.logger.info(
f"Finished one task run for client: {client.client_name} "
f"interval: {interval} task_processed: {task_processed}"
)
# if any client got the END_RUN event, stop the simulator run.
if client_runner.run_abort_signal.triggered:
stop_run = True
self.logger.info("End the Simulator run.")
break
else:
if task_processed:
break
else:
run_task_tries += 1
if run_task_tries >= FETCH_TASK_RUN_RETRY:
break
time.sleep(0.5)
except Exception as e:
self.logger.error(f"do_one_task execute exception: {secure_format_exception(e)}")
interval = 1.0
stop_run = True
return interval, stop_run
def release_resources(self, client):
if client.run_manager:
with client.run_manager.new_context() as fl_ctx:
self.fire_event(EventType.SWAP_OUT, fl_ctx)
fl_ctx.set_prop(FLContextKey.RUNNER, None, private=True)
self.logger.info(f"Clean up ClientRunner for : {client.client_name} ")
def run(self, args, conn):
self.logger.info("ClientTaskWorker started to run")
admin_agent = None
client = None
try:
data = conn.recv()
client_config = data[SimulatorConstants.CLIENT_CONFIG]
deploy_args = data[SimulatorConstants.DEPLOY_ARGS]
build_ctx = data[SimulatorConstants.BUILD_CTX]
client = self._create_client(args, build_ctx, deploy_args)
app_root = os.path.join(args.workspace, SimulatorConstants.JOB_NAME, "app_" + client.client_name)
app_custom_folder = os.path.join(app_root, "custom")
sys.path.append(app_custom_folder)
self.create_client_engine(client, deploy_args)
while True:
interval, stop_run = self.do_one_task(client)
conn.send(stop_run)
continue_run = conn.recv()
if not continue_run:
self.release_resources(client)
break
time.sleep(interval)
except Exception as e:
self.logger.error(f"ClientTaskWorker run error: {secure_format_exception(e)}")
finally:
if client:
client.cell.stop()
if admin_agent:
admin_agent.shutdown()
def _create_client(self, args, build_ctx, deploy_args):
deployer = BaseClientDeployer()
deployer.build(build_ctx)
client = deployer.create_fed_client(deploy_args)
client.token = args.token
self._set_client_status(client, deploy_args, args.simulator_root)
start = time.time()
self._create_client_cell(client, args.root_url, args.parent_url)
self.logger.debug(f"Complete _create_client_cell. Time to create client job cell: {time.time() - start}")
return client
def _set_client_status(self, client, deploy_args, simulator_root):
app_client_root = os.path.join(simulator_root, "app_" + client.client_name)
client.app_client_root = app_client_root
client.args = deploy_args
# self.create_client_runner(client)
client.simulate_running = False
client.status = ClientStatus.STARTED
def _create_client_cell(self, federated_client, root_url, parent_url):
fqcn = FQCN.join([federated_client.client_name, SimulatorConstants.JOB_NAME])
credentials = {}
parent_url = None
cell = Cell(
fqcn=fqcn,
root_url=root_url,
secure=False,
credentials=credentials,
create_internal_listener=False,
parent_url=parent_url,
)
cell.start()
mpm.add_cleanup_cb(cell.stop)
federated_client.cell = cell
federated_client.communicator.cell = cell
start = time.time()
while not cell.is_cell_connected(FQCN.ROOT_SERVER):
time.sleep(0.1)
if time.time() - start > CELL_CONNECT_CHECK_TIMEOUT:
raise RuntimeError("Could not connect to the server cell.")
def _create_connection(listen_port):
address = ("localhost", int(listen_port))
listener = Listener(address, authkey=CommunicationMetaData.CHILD_PASSWORD.encode())
conn = listener.accept()
return conn
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-o", type=str, help="WORKSPACE folder", required=True)
parser.add_argument("--client", type=str, help="Client name", required=True)
parser.add_argument("--token", type=str, help="Client token", required=True)
parser.add_argument("--port", type=str, help="Listen port", required=True)
parser.add_argument("--gpu", "-g", type=str, help="gpu index number")
parser.add_argument("--parent_pid", type=int, help="parent process pid", required=True)
parser.add_argument("--simulator_root", "-root", type=str, help="Simulator root folder")
parser.add_argument("--root_url", "-r", type=str, help="cellnet root_url")
parser.add_argument("--parent_url", "-p", type=str, help="cellnet parent_url")
args = parser.parse_args()
# start parent process checking thread
parent_pid = args.parent_pid
stop_event = threading.Event()
thread = threading.Thread(target=check_parent_alive, args=(parent_pid, stop_event))
thread.start()
log_config_file_path = os.path.join(args.workspace, "startup", WorkspaceConstants.LOGGING_CONFIG)
if not os.path.isfile(log_config_file_path):
log_config_file_path = os.path.join(os.path.dirname(__file__), WorkspaceConstants.LOGGING_CONFIG)
logging.config.fileConfig(fname=log_config_file_path, disable_existing_loggers=False)
workspace = os.path.join(args.workspace, SimulatorConstants.JOB_NAME, "app_" + args.client)
log_file = os.path.join(workspace, WorkspaceConstants.LOG_FILE_NAME)
add_logfile_handler(log_file)
os.chdir(workspace)
fobs_initialize()
AuthorizationService.initialize(EmptyAuthorizer())
# AuditService.initialize(audit_file_name=WorkspaceConstants.AUDIT_LOG)
AuditService.the_auditor = SimulatorAuditor()
if args.gpu:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
conn = _create_connection(args.port)
try:
task_worker = ClientTaskWorker()
task_worker.run(args, conn)
finally:
stop_event.set()
conn.close()
AuditService.close()
if __name__ == "__main__":
"""
This is the main program of simulator worker process when running the NVFlare Simulator..
"""
# main()
mpm.run(main_func=main)
time.sleep(2)
# os._exit(0)
| NVFlare-main | nvflare/private/fed/app/simulator/simulator_worker.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging.config
import os
import shlex
import shutil
import subprocess
import sys
import tempfile
import threading
import time
from argparse import Namespace
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Manager, Process
from multiprocessing.connection import Client
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import JobConstants, MachineStatus, RunProcessKey, WorkspaceConstants
from nvflare.apis.job_def import ALL_SITES, JobMetaKey
from nvflare.apis.utils.job_utils import convert_legacy_zipped_app_to_job
from nvflare.apis.workspace import Workspace
from nvflare.fuel.common.multi_process_executor_constants import CommunicationMetaData
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.f3.stats_pool import StatsPoolManager
from nvflare.fuel.hci.server.authz import AuthorizationService
from nvflare.fuel.sec.audit import AuditService
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.fuel.utils.gpu_utils import get_host_gpu_ids
from nvflare.fuel.utils.network_utils import get_open_ports
from nvflare.fuel.utils.zip_utils import split_path, unzip_all_from_bytes, zip_directory_to_bytes
from nvflare.private.defs import AppFolderConstants
from nvflare.private.fed.app.deployer.simulator_deployer import SimulatorDeployer
from nvflare.private.fed.app.utils import kill_child_processes
from nvflare.private.fed.client.client_status import ClientStatus
from nvflare.private.fed.server.job_meta_validator import JobMetaValidator
from nvflare.private.fed.simulator.simulator_app_runner import SimulatorServerAppRunner
from nvflare.private.fed.simulator.simulator_audit import SimulatorAuditor
from nvflare.private.fed.simulator.simulator_const import SimulatorConstants
from nvflare.private.fed.utils.fed_utils import add_logfile_handler, fobs_initialize, split_gpus
from nvflare.security.logging import secure_format_exception
from nvflare.security.security import EmptyAuthorizer
CLIENT_CREATE_POOL_SIZE = 200
POOL_STATS_DIR = "pool_stats"
SIMULATOR_POOL_STATS = "simulator_cell_stats.json"
class SimulatorRunner(FLComponent):
def __init__(
self, job_folder: str, workspace: str, clients=None, n_clients=None, threads=None, gpu=None, max_clients=100
):
super().__init__()
self.job_folder = job_folder
self.workspace = workspace
self.clients = clients
self.n_clients = n_clients
self.threads = threads
self.gpu = gpu
self.max_clients = max_clients
self.ask_to_stop = False
self.simulator_root = None
self.server = None
self.deployer = SimulatorDeployer()
self.client_names = []
self.federated_clients = []
self.client_config = None
self.deploy_args = None
self.build_ctx = None
self.clients_created = 0
def _generate_args(
self, job_folder: str, workspace: str, clients=None, n_clients=None, threads=None, gpu=None, max_clients=100
):
args = Namespace(
job_folder=job_folder,
workspace=workspace,
clients=clients,
n_clients=n_clients,
threads=threads,
gpu=gpu,
max_clients=max_clients,
)
args.set = []
return args
def setup(self):
running_dir = os.getcwd()
if self.workspace is None:
self.workspace = "simulator_workspace"
self.logger.warn(
f"Simulator workspace is not provided. Set it to the default location:"
f" {os.path.join(running_dir, self.workspace)}"
)
self.workspace = os.path.join(running_dir, self.workspace)
self.args = self._generate_args(
self.job_folder, self.workspace, self.clients, self.n_clients, self.threads, self.gpu, self.max_clients
)
if self.args.clients:
self.client_names = self.args.clients.strip().split(",")
else:
if self.args.n_clients:
for i in range(self.args.n_clients):
self.client_names.append("site-" + str(i + 1))
log_config_file_path = os.path.join(self.args.workspace, "startup", WorkspaceConstants.LOGGING_CONFIG)
if not os.path.isfile(log_config_file_path):
log_config_file_path = os.path.join(os.path.dirname(__file__), WorkspaceConstants.LOGGING_CONFIG)
logging.config.fileConfig(fname=log_config_file_path, disable_existing_loggers=False)
local_dir = os.path.join(self.args.workspace, "local")
os.makedirs(local_dir, exist_ok=True)
shutil.copyfile(log_config_file_path, os.path.join(local_dir, WorkspaceConstants.LOGGING_CONFIG))
self.args.log_config = None
self.args.config_folder = "config"
self.args.job_id = SimulatorConstants.JOB_NAME
self.args.client_config = os.path.join(self.args.config_folder, JobConstants.CLIENT_JOB_CONFIG)
self.args.env = os.path.join("config", AppFolderConstants.CONFIG_ENV)
cwd = os.getcwd()
self.args.job_folder = os.path.join(cwd, self.args.job_folder)
if not os.path.exists(self.args.workspace):
os.makedirs(self.args.workspace)
os.chdir(self.args.workspace)
fobs_initialize()
AuthorizationService.initialize(EmptyAuthorizer())
AuditService.the_auditor = SimulatorAuditor()
self.simulator_root = os.path.join(self.args.workspace, SimulatorConstants.JOB_NAME)
if os.path.exists(self.simulator_root):
shutil.rmtree(self.simulator_root)
os.makedirs(self.simulator_root)
log_file = os.path.join(self.simulator_root, WorkspaceConstants.LOG_FILE_NAME)
add_logfile_handler(log_file)
try:
data_bytes, job_name, meta = self.validate_job_data()
if not self.client_names:
self.client_names = self._extract_client_names_from_meta(meta)
if not self.client_names:
self.args.n_clients = 2
self.logger.warn("The number of simulator clients is not provided. Setting it to default: 2")
for i in range(self.args.n_clients):
self.client_names.append("site-" + str(i + 1))
if self.args.gpu is None and self.args.threads is None:
self.args.threads = 1
self.logger.warn("The number of threads is not provided. Set it to default: 1")
if self.max_clients < len(self.client_names):
self.logger.error(
f"The number of clients ({len(self.client_names)}) can not be more than the "
f"max_number of clients ({self.max_clients})"
)
return False
if self.args.gpu:
try:
gpu_groups = split_gpus(self.args.gpu)
except ValueError as e:
self.logger.error(f"GPUs group list option in wrong format. Error: {e}")
return False
host_gpus = [str(x) for x in (get_host_gpu_ids())]
gpu_ids = [x.split(",") for x in gpu_groups]
if host_gpus and not set().union(*gpu_ids).issubset(host_gpus):
wrong_gpus = [x for x in gpu_groups if x not in host_gpus]
self.logger.error(f"These GPUs are not available: {wrong_gpus}")
return False
if len(gpu_groups) > len(self.client_names):
self.logger.error(
f"The number of clients ({len(self.client_names)}) must be larger than or equal to "
f"the number of GPU groups: ({len(gpu_groups)})"
)
return False
if len(gpu_groups) > 1:
if self.args.threads and self.args.threads > 1:
self.logger.info(
"When running with multi GPU, each GPU group will run with only 1 thread. "
"Set the Threads to 1."
)
self.args.threads = 1
elif len(gpu_groups) == 1:
if self.args.threads is None:
self.args.threads = 1
self.logger.warn("The number of threads is not provided. Set it to default: 1")
if self.args.threads and self.args.threads > len(self.client_names):
self.logger.error("The number of threads to run can not be larger than the number of clients.")
return False
if not (self.args.gpu or self.args.threads):
self.logger.error("Please provide the number of threads or provide gpu options to run the simulator.")
return False
self._validate_client_names(meta, self.client_names)
# Deploy the FL server
self.logger.info("Create the Simulator Server.")
simulator_server, self.server = self.deployer.create_fl_server(self.args)
# self.services.deploy(self.args, grpc_args=simulator_server)
self.logger.info("Deploy the Apps.")
self._deploy_apps(job_name, data_bytes, meta)
return True
except Exception as e:
self.logger.error(f"Simulator setup error: {secure_format_exception(e)}")
return False
def validate_job_data(self):
# Validate the simulate job
job_name = split_path(self.args.job_folder)[1]
data = zip_directory_to_bytes("", self.args.job_folder)
data_bytes = convert_legacy_zipped_app_to_job(data)
job_validator = JobMetaValidator()
valid, error, meta = job_validator.validate(job_name, data_bytes)
if not valid:
raise RuntimeError(error)
return data_bytes, job_name, meta
def _extract_client_names_from_meta(self, meta):
client_names = []
for _, participants in meta.get(JobMetaKey.DEPLOY_MAP, {}).items():
for p in participants:
if p.upper() != ALL_SITES and p != "server":
client_names.append(p)
return client_names
def _validate_client_names(self, meta, client_names):
no_app_clients = []
for name in client_names:
name_matched = False
for _, participants in meta.get(JobMetaKey.DEPLOY_MAP, {}).items():
if len(participants) == 1 and participants[0].upper() == ALL_SITES:
name_matched = True
break
if name in participants:
name_matched = True
break
if not name_matched:
no_app_clients.append(name)
if no_app_clients:
raise RuntimeError(f"The job does not have App to run for clients: {no_app_clients}")
def _deploy_apps(self, job_name, data_bytes, meta):
with tempfile.TemporaryDirectory() as temp_dir:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
os.mkdir(temp_dir)
unzip_all_from_bytes(data_bytes, temp_dir)
temp_job_folder = os.path.join(temp_dir, job_name)
app_server_root = os.path.join(self.simulator_root, "app_server")
for app_name, participants in meta.get(JobMetaKey.DEPLOY_MAP).items():
if len(participants) == 1 and participants[0].upper() == ALL_SITES:
participants = ["server"]
participants.extend([client for client in self.client_names])
for p in participants:
if p == "server":
app = os.path.join(temp_job_folder, app_name)
shutil.copytree(app, app_server_root)
elif p in self.client_names:
app_client_root = os.path.join(self.simulator_root, "app_" + p)
app = os.path.join(temp_job_folder, app_name)
shutil.copytree(app, app_client_root)
job_meta_file = os.path.join(self.simulator_root, WorkspaceConstants.JOB_META_FILE)
with open(job_meta_file, "w") as f:
json.dump(meta, f, indent=4)
def split_clients(self, clients: [], gpus: []):
split_clients = []
for _ in gpus:
split_clients.append([])
index = 0
for client in clients:
split_clients[index % len(gpus)].append(client)
index += 1
return split_clients
def create_clients(self):
# Deploy the FL clients
self.logger.info("Create the simulate clients.")
clients_created_waiter = threading.Event()
for client_name in self.client_names:
self.create_client(client_name)
self.logger.info("Set the client status ready.")
self._set_client_status()
def create_client(self, client_name):
client, self.client_config, self.deploy_args, self.build_ctx = self.deployer.create_fl_client(
client_name, self.args
)
self.federated_clients.append(client)
app_root = os.path.join(self.simulator_root, "app_" + client_name)
app_custom_folder = os.path.join(app_root, "custom")
sys.path.append(app_custom_folder)
def _set_client_status(self):
for client in self.federated_clients:
app_client_root = os.path.join(self.simulator_root, "app_" + client.client_name)
client.app_client_root = app_client_root
client.args = self.args
# self.create_client_runner(client)
client.simulate_running = False
client.status = ClientStatus.STARTED
def run(self):
try:
manager = Manager()
return_dict = manager.dict()
process = Process(target=self.run_processs, args=(return_dict,))
process.start()
process.join()
if process.exitcode == -9:
run_status = process.exitcode
else:
run_status = return_dict["run_status"]
return run_status
except KeyboardInterrupt:
self.logger.info("KeyboardInterrupt, terminate all the child processes.")
kill_child_processes(os.getpid())
return -9
def run_processs(self, return_dict):
# run_status = self.simulator_run_main()
run_status = mpm.run(main_func=self.simulator_run_main, shutdown_grace_time=3, cleanup_grace_time=6)
return_dict["run_status"] = run_status
def simulator_run_main(self):
if self.setup():
try:
self.create_clients()
self.server.engine.run_processes[SimulatorConstants.JOB_NAME] = {
RunProcessKey.LISTEN_PORT: None,
RunProcessKey.CONNECTION: None,
RunProcessKey.CHILD_PROCESS: None,
RunProcessKey.JOB_ID: SimulatorConstants.JOB_NAME,
RunProcessKey.PARTICIPANTS: self.server.engine.client_manager.clients,
}
self.logger.info("Deploy and start the Server App.")
server_thread = threading.Thread(target=self.start_server_app, args=[])
server_thread.start()
# wait for the server app is started
while self.server.engine.engine_info.status != MachineStatus.STARTED:
time.sleep(1.0)
if not server_thread.is_alive():
raise RuntimeError("Could not start the Server App.")
# # Start the client heartbeat calls.
# for client in self.federated_clients:
# client.start_heartbeat(interval=2)
if self.args.gpu:
gpus = split_gpus(self.args.gpu)
split_clients = self.split_clients(self.federated_clients, gpus)
else:
gpus = [None]
split_clients = [self.federated_clients]
executor = ThreadPoolExecutor(max_workers=len(gpus))
for index in range(len(gpus)):
clients = split_clients[index]
executor.submit(lambda p: self.client_run(*p), [clients, gpus[index]])
executor.shutdown()
# Abort the server after all clients finished run
self.server.abort_run()
server_thread.join()
run_status = 0
except Exception as e:
self.logger.error(f"Simulator run error: {secure_format_exception(e)}")
run_status = 2
finally:
# self.services.close()
self.deployer.close()
else:
run_status = 1
return run_status
def client_run(self, clients, gpu):
client_runner = SimulatorClientRunner(self.args, clients, self.client_config, self.deploy_args, self.build_ctx)
client_runner.run(gpu)
def start_server_app(self):
app_server_root = os.path.join(self.simulator_root, "app_server")
self.args.server_config = os.path.join("config", JobConstants.SERVER_JOB_CONFIG)
app_custom_folder = os.path.join(app_server_root, "custom")
sys.path.append(app_custom_folder)
startup = os.path.join(self.args.workspace, WorkspaceConstants.STARTUP_FOLDER_NAME)
os.makedirs(startup, exist_ok=True)
local = os.path.join(self.args.workspace, WorkspaceConstants.SITE_FOLDER_NAME)
os.makedirs(local, exist_ok=True)
workspace = Workspace(root_dir=self.args.workspace, site_name="server")
self.server.job_cell = self.server.create_job_cell(
SimulatorConstants.JOB_NAME,
self.server.cell.get_root_url_for_child(),
self.server.cell.get_internal_listener_url(),
False,
None,
)
server_app_runner = SimulatorServerAppRunner(self.server)
snapshot = None
server_app_runner.start_server_app(
workspace, self.args, app_server_root, self.args.job_id, snapshot, self.logger
)
# start = time.time()
# while self.services.engine.client_manager.clients:
# # Wait for the clients to shutdown and quite first.
# time.sleep(0.1)
# if time.time() - start > 30.:
# break
self.dump_stats(workspace)
self.server.admin_server.stop()
self.server.close()
def dump_stats(self, workspace: Workspace):
stats_dict = StatsPoolManager.to_dict()
json_object = json.dumps(stats_dict, indent=4)
os.makedirs(os.path.join(workspace.get_run_dir(SimulatorConstants.JOB_NAME), POOL_STATS_DIR))
file = os.path.join(workspace.get_run_dir(SimulatorConstants.JOB_NAME), POOL_STATS_DIR, SIMULATOR_POOL_STATS)
with open(file, "w") as outfile:
outfile.write(json_object)
class SimulatorClientRunner(FLComponent):
def __init__(self, args, clients: [], client_config, deploy_args, build_ctx):
super().__init__()
self.args = args
self.federated_clients = clients
self.run_client_index = -1
self.simulator_root = os.path.join(self.args.workspace, SimulatorConstants.JOB_NAME)
self.client_config = client_config
self.deploy_args = deploy_args
self.build_ctx = build_ctx
self.kv_list = parse_vars(args.set)
def run(self, gpu):
try:
# self.create_clients()
self.logger.info("Start the clients run simulation.")
executor = ThreadPoolExecutor(max_workers=self.args.threads)
lock = threading.Lock()
timeout = self.kv_list.get("simulator_worker_timeout", 60.0)
for i in range(self.args.threads):
executor.submit(lambda p: self.run_client_thread(*p), [self.args.threads, gpu, lock, timeout])
# wait for the server and client running thread to finish.
executor.shutdown()
except Exception as e:
self.logger.error(f"SimulatorClientRunner run error: {secure_format_exception(e)}")
finally:
for client in self.federated_clients:
threading.Thread(target=self._shutdown_client, args=[client]).start()
def _shutdown_client(self, client):
try:
client.communicator.heartbeat_done = True
# time.sleep(3)
client.terminate()
# client.close()
client.status = ClientStatus.STOPPED
client.communicator.cell.stop()
except:
# Ignore the exception for the simulator client shutdown
self.logger.warn(f"Exception happened to client{client.name} during shutdown ")
def run_client_thread(self, num_of_threads, gpu, lock, timeout=60):
stop_run = False
interval = 1
client_to_run = None # indicates the next client to run
try:
while not stop_run:
time.sleep(interval)
with lock:
if not client_to_run:
client = self.get_next_run_client(gpu)
else:
client = client_to_run
client.simulate_running = True
stop_run, client_to_run = self.do_one_task(client, num_of_threads, gpu, lock, timeout=timeout)
client.simulate_running = False
except Exception as e:
self.logger.error(f"run_client_thread error: {secure_format_exception(e)}")
def do_one_task(self, client, num_of_threads, gpu, lock, timeout=60.0):
open_port = get_open_ports(1)[0]
command = (
sys.executable
+ " -m nvflare.private.fed.app.simulator.simulator_worker -o "
+ self.args.workspace
+ " --client "
+ client.client_name
+ " --token "
+ client.token
+ " --port "
+ str(open_port)
+ " --parent_pid "
+ str(os.getpid())
+ " --simulator_root "
+ self.simulator_root
+ " --root_url "
+ str(client.cell.get_root_url_for_child())
+ " --parent_url "
+ str(client.cell.get_internal_listener_url())
)
if gpu:
command += " --gpu " + str(gpu)
new_env = os.environ.copy()
if not sys.path[0]:
new_env["PYTHONPATH"] = os.pathsep.join(sys.path[1:])
else:
new_env["PYTHONPATH"] = os.pathsep.join(sys.path)
_ = subprocess.Popen(shlex.split(command, True), preexec_fn=os.setsid, env=new_env)
conn = self._create_connection(open_port, timeout=timeout)
self.build_ctx["client_name"] = client.client_name
data = {
# SimulatorConstants.CLIENT: client,
SimulatorConstants.CLIENT_CONFIG: self.client_config,
SimulatorConstants.DEPLOY_ARGS: self.deploy_args,
SimulatorConstants.BUILD_CTX: self.build_ctx,
}
conn.send(data)
while True:
stop_run = conn.recv()
with lock:
if num_of_threads != len(self.federated_clients):
next_client = self.get_next_run_client(gpu)
else:
next_client = client
if not stop_run and next_client.client_name == client.client_name:
conn.send(True)
else:
conn.send(False)
break
return stop_run, next_client
def _create_connection(self, open_port, timeout=60.0):
conn = None
start = time.time()
while not conn:
try:
address = ("localhost", open_port)
conn = Client(address, authkey=CommunicationMetaData.CHILD_PASSWORD.encode())
except Exception:
if time.time() - start > timeout:
raise RuntimeError(
f"Failed to create connection to the child process in {self.__class__.__name__},"
f" timeout: {timeout}"
)
time.sleep(1.0)
pass
return conn
def get_next_run_client(self, gpu):
# Find the next client which is not currently running
while True:
self.run_client_index = (self.run_client_index + 1) % len(self.federated_clients)
client = self.federated_clients[self.run_client_index]
if not client.simulate_running:
break
self.logger.info(f"Simulate Run client: {client.client_name} on GPU group: {gpu}")
return client
| NVFlare-main | nvflare/private/fed/app/simulator/simulator_runner.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simulator package."""
| NVFlare-main | nvflare/private/fed/app/simulator/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Federated Simulator launching script."""
import argparse
import sys
from sys import platform
from nvflare.private.fed.app.simulator.simulator_runner import SimulatorRunner
def define_simulator_parser(simulator_parser):
simulator_parser.add_argument("job_folder")
simulator_parser.add_argument("-w", "--workspace", type=str, help="WORKSPACE folder")
simulator_parser.add_argument("-n", "--n_clients", type=int, help="number of clients")
simulator_parser.add_argument("-c", "--clients", type=str, help="client names list")
simulator_parser.add_argument("-t", "--threads", type=int, help="number of parallel running clients")
simulator_parser.add_argument("-gpu", "--gpu", type=str, help="list of GPU Device Ids, comma separated")
simulator_parser.add_argument("-m", "--max_clients", type=int, default=100, help="max number of clients")
def run_simulator(simulator_args):
simulator = SimulatorRunner(
job_folder=simulator_args.job_folder,
workspace=simulator_args.workspace,
clients=simulator_args.clients,
n_clients=simulator_args.n_clients,
threads=simulator_args.threads,
gpu=simulator_args.gpu,
max_clients=simulator_args.max_clients,
)
run_status = simulator.run()
return run_status
if __name__ == "__main__":
"""
This is the main program when running the NVFlare Simulator. Use the Flare simulator API,
create the SimulatorRunner object, do a setup(), then calls the run().
"""
# For MacOS, it needs to use 'spawn' for creating multi-process.
if platform == "darwin":
# OS X
import multiprocessing
multiprocessing.set_start_method("spawn")
if sys.version_info < (3, 8):
raise RuntimeError("Please use Python 3.8 or above.")
parser = argparse.ArgumentParser()
define_simulator_parser(parser)
args = parser.parse_args()
status = run_simulator(args)
sys.exit(status)
| NVFlare-main | nvflare/private/fed/app/simulator/simulator.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Federated client launching script."""
import argparse
import os
import sys
import time
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import JobConstants, SiteType, WorkspaceConstants
from nvflare.apis.workspace import Workspace
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.private.defs import AppFolderConstants
from nvflare.private.fed.app.fl_conf import FLClientStarterConfiger, create_privacy_manager
from nvflare.private.fed.client.admin import FedAdminAgent
from nvflare.private.fed.client.client_engine import ClientEngine
from nvflare.private.fed.client.client_status import ClientStatus
from nvflare.private.fed.client.fed_client import FederatedClient
from nvflare.private.fed.utils.fed_utils import add_logfile_handler, fobs_initialize, security_init
from nvflare.private.privacy_manager import PrivacyService
from nvflare.security.logging import secure_format_exception
def main():
if sys.version_info >= (3, 11):
raise RuntimeError("Python versions 3.11 and above are not yet supported. Please use Python 3.8, 3.9 or 3.10.")
if sys.version_info < (3, 8):
raise RuntimeError("Python versions 3.7 and below are not supported. Please use Python 3.8, 3.9 or 3.10")
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
parser.add_argument("--fed_client", "-s", type=str, help="client config json file", required=True)
parser.add_argument("--set", metavar="KEY=VALUE", nargs="*")
parser.add_argument("--local_rank", type=int, default=0)
args = parser.parse_args()
kv_list = parse_vars(args.set)
config_folder = kv_list.get("config_folder", "")
if config_folder == "":
args.client_config = JobConstants.CLIENT_JOB_CONFIG
else:
args.client_config = os.path.join(config_folder, JobConstants.CLIENT_JOB_CONFIG)
# TODO:: remove env and train config since they are not core
args.env = os.path.join("config", AppFolderConstants.CONFIG_ENV)
args.train_config = os.path.join("config", AppFolderConstants.CONFIG_TRAIN)
args.log_config = None
args.job_id = None
workspace = Workspace(root_dir=args.workspace)
for name in [WorkspaceConstants.RESTART_FILE, WorkspaceConstants.SHUTDOWN_FILE]:
try:
f = workspace.get_file_path_in_root(name)
if os.path.exists(f):
os.remove(f)
except Exception:
print("Could not remove the {} file. Please check your system before starting FL.".format(name))
sys.exit(-1)
rank = args.local_rank
try:
os.chdir(args.workspace)
fobs_initialize()
conf = FLClientStarterConfiger(
workspace=workspace,
args=args,
kv_list=args.set,
)
conf.configure()
log_file = workspace.get_log_file_path()
add_logfile_handler(log_file)
deployer = conf.base_deployer
security_init(
secure_train=deployer.secure_train,
site_org=conf.site_org,
workspace=workspace,
app_validator=conf.app_validator,
site_type=SiteType.CLIENT,
)
# initialize Privacy Service
privacy_manager = create_privacy_manager(workspace, names_only=True)
PrivacyService.initialize(privacy_manager)
federated_client = deployer.create_fed_client(args)
federated_client.start_overseer_agent()
while not federated_client.sp_established:
print("Waiting for SP....")
time.sleep(1.0)
federated_client.use_gpu = False
federated_client.config_folder = config_folder
while federated_client.cell is None:
print("Waiting client cell to be created ....")
time.sleep(1.0)
federated_client.register()
if not federated_client.token:
print("The client could not register to server. ")
raise RuntimeError("Login failed.")
federated_client.start_heartbeat(interval=kv_list.get("heart_beat_interval", 10.0))
admin_agent = create_admin_agent(
deployer.req_processors,
federated_client,
args,
rank,
)
while federated_client.status != ClientStatus.STOPPED:
time.sleep(1.0)
deployer.close()
except ConfigError as e:
print(f"ConfigError: {secure_format_exception(e)}")
def create_admin_agent(
req_processors,
federated_client: FederatedClient,
args,
rank,
):
"""Creates an admin agent.
Args:
req_processors: request processors
federated_client: FL client object
args: command args
rank: client rank process number
Returns:
A FedAdminAgent.
"""
client_engine = ClientEngine(federated_client, federated_client.token, args, rank)
admin_agent = FedAdminAgent(
client_name="admin_agent",
cell=federated_client.cell,
app_ctx=client_engine,
)
client_engine.set_agent(admin_agent)
federated_client.set_client_engine(client_engine)
for processor in req_processors:
admin_agent.register_processor(processor)
client_engine.fire_event(EventType.SYSTEM_START, client_engine.new_context())
return admin_agent
if __name__ == "__main__":
"""
This is the main program when starting the NVIDIA FLARE client process.
"""
# # For MacOS, it needs to use 'spawn' for creating multi-process.
# if os.name == 'posix':
# import multiprocessing
# multiprocessing.set_start_method('spawn')
# import multiprocessing
# multiprocessing.set_start_method('spawn')
# main()
rc = mpm.run(main_func=main)
sys.exit(rc)
| NVFlare-main | nvflare/private/fed/app/client/client_train.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FL client application package."""
| NVFlare-main | nvflare/private/fed/app/client/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sub_worker process to start the multi-processes client."""
import argparse
import copy
import logging
import os
import sys
import threading
import time
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.signal import Signal
from nvflare.apis.utils.fl_context_utils import get_serializable_data
from nvflare.apis.workspace import Workspace
from nvflare.app_common.executors.multi_process_executor import WorkerComponentBuilder
from nvflare.fuel.common.multi_process_executor_constants import (
CommunicateData,
CommunicationMetaData,
MultiProcessCommandNames,
)
from nvflare.fuel.f3.cellnet.cell import Cell
from nvflare.fuel.f3.cellnet.core_cell import Message as CellMessage
from nvflare.fuel.f3.cellnet.core_cell import MessageHeaderKey, make_reply
from nvflare.fuel.f3.cellnet.defs import ReturnCode
from nvflare.fuel.f3.cellnet.fqcn import FQCN
from nvflare.fuel.f3.cellnet.net_agent import NetAgent
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.sec.audit import AuditService
from nvflare.fuel.sec.security_content_service import SecurityContentService
from nvflare.private.defs import CellChannel, CellChannelTopic, new_cell_message
from nvflare.private.fed.app.fl_conf import create_privacy_manager
from nvflare.private.fed.app.utils import monitor_parent_process
from nvflare.private.fed.client.client_run_manager import ClientRunManager
from nvflare.private.fed.runner import Runner
from nvflare.private.fed.simulator.simulator_app_runner import SimulatorClientRunManager
from nvflare.private.fed.utils.fed_utils import (
add_logfile_handler,
configure_logging,
create_stats_pool_files_for_job,
fobs_initialize,
set_stats_pool_config_for_job,
)
from nvflare.private.privacy_manager import PrivacyService
class EventRelayer(FLComponent):
"""To relay the event from the worker_process."""
def __init__(self, cell, parent_fqcn, local_rank):
"""To init the EventRelayer.
Args:
cell: the local cell.
parent_fqcn: FQCN of the parent cell
local_rank: process local rank
"""
super().__init__()
self.cell = cell
self.parent_fqcn = parent_fqcn
self.local_rank = local_rank
self.event_lock = threading.Lock()
self.start_run_fired = False
def relay_event(self, run_manager, data):
"""To relay the event.
Args:
run_manager: Client_Run_Manager
data: event data
"""
with run_manager.new_context() as fl_ctx:
event_type = data[CommunicationMetaData.EVENT_TYPE]
if event_type == EventType.START_RUN:
if self.start_run_fired:
return
else:
self.start_run_fired = True
fl_ctx.props.update(data[CommunicationMetaData.FL_CTX].props)
fl_ctx.set_prop(
FLContextKey.EVENT_ORIGIN_SITE, CommunicateData.MULTI_PROCESS_EXECUTOR, private=True, sticky=False
)
self.fire_event(event_type=event_type, fl_ctx=fl_ctx)
def handle_event(self, event_type: str, fl_ctx: FLContext):
"""To handle the event.
Args:
event_type: event_type
fl_ctx: FLContext
"""
event_site = fl_ctx.get_prop(FLContextKey.EVENT_ORIGIN_SITE)
new_fl_ctx = FLContext()
new_fl_ctx.props.update(copy.deepcopy(get_serializable_data(fl_ctx).props))
if event_site != CommunicateData.MULTI_PROCESS_EXECUTOR:
with self.event_lock:
try:
data = {
CommunicationMetaData.EVENT_TYPE: event_type,
CommunicationMetaData.RANK_NUMBER: self.local_rank,
CommunicationMetaData.FL_CTX: new_fl_ctx,
}
request = new_cell_message({}, data)
return_data = self.cell.send_request(
target=self.parent_fqcn,
channel=CellChannel.MULTI_PROCESS_EXECUTOR,
topic=CellChannelTopic.FIRE_EVENT,
request=request,
)
# update the fl_ctx from the child process return data.
fl_ctx.props.update(return_data.payload[CommunicationMetaData.FL_CTX].props)
except Exception:
self.log_warning(
fl_ctx, f"Failed to relay the event to parent process. Event: {event_type}", fire_event=False
)
class SubWorkerExecutor(Runner):
def __init__(self, args, workspace, num_of_processes, local_rank) -> None:
super().__init__()
self.args = args
self.workspace = workspace
self.components = {}
self.handlers = []
self.executor = None
self.run_manager = None
self.num_of_processes = num_of_processes
self.local_rank = local_rank
self.done = False
fqcn = FQCN.join([args.client_name, args.job_id, str(local_rank)])
credentials = {}
self.cell = Cell(
fqcn=fqcn,
root_url=args.root_url,
secure=False,
credentials=credentials,
create_internal_listener=True,
parent_url=args.parent_url,
)
self.cell.start()
net_agent = NetAgent(self.cell)
self.cell.register_request_cb(
channel=CellChannel.CLIENT_SUB_WORKER_COMMAND,
topic="*",
cb=self.execute_command,
)
mpm.add_cleanup_cb(net_agent.close)
mpm.add_cleanup_cb(self.cell.stop)
self.commands = {
MultiProcessCommandNames.INITIALIZE: self._initialize,
MultiProcessCommandNames.TASK_EXECUTION: self._execute_task,
MultiProcessCommandNames.FIRE_EVENT: self._handle_event,
MultiProcessCommandNames.CLOSE: self._close,
}
self.logger = logging.getLogger(self.__class__.__name__)
def execute_command(self, request: CellMessage) -> CellMessage:
command_name = request.get_header(MessageHeaderKey.TOPIC)
data = request.payload
if command_name not in self.commands:
return make_reply(ReturnCode.INVALID_REQUEST, "", None)
return self.commands[command_name](data)
def _initialize(self, data):
executor_id = data[CommunicationMetaData.LOCAL_EXECUTOR]
components_conf = data[CommunicationMetaData.COMPONENTS]
component_builder = WorkerComponentBuilder()
for item in components_conf:
cid = item.get("id", None)
if not cid:
raise TypeError("missing component id")
self.components[cid] = component_builder.build_component(item)
if isinstance(self.components[cid], FLComponent):
self.handlers.append(self.components[cid])
self.executor = self.components.get(executor_id, None)
if not isinstance(self.executor, Executor):
make_reply(
ReturnCode.INVALID_REQUEST,
"invalid executor {}: expect Executor but got {}".format(executor_id, type(self.executor)),
None,
)
job_id = self.args.job_id
self._get_client_run_manager(job_id)
parent_fqcn = FQCN.join([self.args.client_name, self.args.job_id])
relayer = EventRelayer(self.cell, parent_fqcn, self.local_rank)
self.run_manager.add_handler(relayer)
self.run_manager.components[CommunicationMetaData.RELAYER] = relayer
with self.run_manager.new_context() as fl_ctx:
fl_ctx.set_prop(FLContextKey.RANK_NUMBER, self.local_rank, private=True, sticky=True)
fl_ctx.set_prop(FLContextKey.NUM_OF_PROCESSES, self.num_of_processes, private=True, sticky=True)
event_data = {
CommunicationMetaData.EVENT_TYPE: EventType.START_RUN,
CommunicationMetaData.FL_CTX: data[CommunicationMetaData.FL_CTX],
}
relayer.relay_event(self.run_manager, event_data)
return make_reply(ReturnCode.OK, "", None)
def _get_client_run_manager(self, job_id):
if self.args.simulator_engine.lower() == "true":
self.run_manager = SimulatorClientRunManager(
client_name=self.args.client_name,
job_id=job_id,
workspace=self.workspace,
client=None,
components=self.components,
handlers=self.handlers,
conf=None,
)
else:
self.run_manager = ClientRunManager(
client_name=self.args.client_name,
job_id=job_id,
workspace=self.workspace,
client=None,
components=self.components,
handlers=self.handlers,
conf=None,
)
def _execute_task(self, data):
"""To execute the event task and pass to worker_process.
Args:
"""
with self.run_manager.new_context() as fl_ctx:
abort_signal = Signal()
task_name = data[CommunicationMetaData.TASK_NAME]
shareable = data[CommunicationMetaData.SHAREABLE]
fl_ctx.props.update(data[CommunicationMetaData.FL_CTX].props)
shareable = self.executor.execute(
task_name=task_name, shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal
)
if self.local_rank == 0:
return_data = {
CommunicationMetaData.SHAREABLE: shareable,
CommunicationMetaData.FL_CTX: get_serializable_data(fl_ctx),
}
request = new_cell_message({}, return_data)
fqcn = FQCN.join([self.args.client_name, self.args.job_id])
self.cell.send_request(
target=fqcn,
channel=CellChannel.MULTI_PROCESS_EXECUTOR,
topic=CellChannelTopic.EXECUTE_RESULT,
request=request,
)
def _handle_event(self, data):
"""To handle the event.
Args:
"""
event_relayer = self.run_manager.get_component(CommunicationMetaData.RELAYER)
event_relayer.relay_event(self.run_manager, data)
def _close(self, data):
self.done = True
self.cell.stop()
# mpm.stop()
def run(self):
self.logger.info("SubWorkerExecutor process started.")
while not self.done:
time.sleep(1.0)
# self.cell.run()
# mpm.run("Client sub_worker")
self.logger.info("SubWorkerExecutor process shutdown.")
def stop(self):
self.done = True
def main():
"""Sub_worker process program."""
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
parser.add_argument("--num_processes", type=str, help="Listen ports", required=True)
parser.add_argument("--job_id", "-n", type=str, help="job_id", required=True)
parser.add_argument("--client_name", "-c", type=str, help="client name", required=True)
parser.add_argument("--simulator_engine", "-s", type=str, help="simulator engine", required=True)
parser.add_argument("--parent_pid", type=int, help="parent process pid", required=True)
parser.add_argument("--root_url", type=str, help="root cell url", required=True)
parser.add_argument("--parent_url", type=str, help="parent cell url", required=True)
args = parser.parse_args()
workspace = Workspace(args.workspace, args.client_name)
app_custom_folder = workspace.get_client_custom_dir()
if os.path.isdir(app_custom_folder):
sys.path.append(app_custom_folder)
configure_logging(workspace)
fobs_initialize()
SecurityContentService.initialize(content_folder=workspace.get_startup_kit_dir())
# Initialize audit service since the job execution will need it!
AuditService.initialize(workspace.get_audit_file_path())
# configure privacy control!
privacy_manager = create_privacy_manager(workspace, names_only=True)
# initialize Privacy Service
PrivacyService.initialize(privacy_manager)
local_rank = int(os.environ["LOCAL_RANK"])
prefix = f"rank{local_rank}"
set_stats_pool_config_for_job(workspace, args.job_id, prefix=prefix)
num_of_processes = int(args.num_processes)
sub_executor = SubWorkerExecutor(args, workspace, num_of_processes, local_rank)
# start parent process checking thread
parent_pid = args.parent_pid
stop_event = threading.Event()
thread = threading.Thread(target=monitor_parent_process, args=(sub_executor, parent_pid, stop_event))
thread.start()
job_id = args.job_id
log_file = workspace.get_app_log_file_path(job_id)
add_logfile_handler(log_file)
logger = logging.getLogger("sub_worker_process")
sub_executor.run()
AuditService.close()
err = create_stats_pool_files_for_job(workspace, job_id, prefix=prefix)
if err:
logger.warning(err)
if __name__ == "__main__":
"""
This is the program for running rank processes in multi-process mode.
"""
# main()
mpm.run(main_func=main)
| NVFlare-main | nvflare/private/fed/app/client/sub_worker_process.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a command line interface for a federated client trainer."""
import argparse
import logging
import os
import sys
import threading
from nvflare.apis.fl_constant import FLContextKey, JobConstants
from nvflare.apis.overseer_spec import SP
from nvflare.apis.workspace import Workspace
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.sec.audit import AuditService
from nvflare.fuel.sec.security_content_service import SecurityContentService
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.private.defs import EngineConstant
from nvflare.private.fed.app.fl_conf import FLClientStarterConfiger
from nvflare.private.fed.app.utils import monitor_parent_process
from nvflare.private.fed.client.client_app_runner import ClientAppRunner
from nvflare.private.fed.client.client_status import ClientStatus
from nvflare.private.fed.utils.fed_utils import (
add_logfile_handler,
create_stats_pool_files_for_job,
fobs_initialize,
set_stats_pool_config_for_job,
)
from nvflare.security.logging import secure_format_exception
def main():
"""Worker process start program."""
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
parser.add_argument("--startup", "-w", type=str, help="startup folder", required=True)
parser.add_argument("--token", "-t", type=str, help="token", required=True)
parser.add_argument("--ssid", "-d", type=str, help="ssid", required=True)
parser.add_argument("--job_id", "-n", type=str, help="job_id", required=True)
parser.add_argument("--client_name", "-c", type=str, help="client name", required=True)
# parser.add_argument("--listen_port", "-p", type=str, help="listen port", required=True)
parser.add_argument("--sp_target", "-g", type=str, help="Sp target", required=True)
parser.add_argument("--parent_url", "-p", type=str, help="parent_url", required=True)
parser.add_argument(
"--fed_client", "-s", type=str, help="an aggregation server specification json file", required=True
)
parser.add_argument("--set", metavar="KEY=VALUE", nargs="*")
parser.add_argument("--local_rank", type=int, default=0)
args = parser.parse_args()
kv_list = parse_vars(args.set)
# get parent process id
parent_pid = os.getppid()
args.train_config = os.path.join("config", "config_train.json")
config_folder = kv_list.get("config_folder", "")
secure_train = kv_list.get("secure_train", True)
if config_folder == "":
args.client_config = JobConstants.CLIENT_JOB_CONFIG
else:
args.client_config = os.path.join(config_folder, JobConstants.CLIENT_JOB_CONFIG)
args.config_folder = config_folder
args.env = os.path.join("config", "environment.json")
workspace = Workspace(args.workspace, args.client_name, config_folder)
set_stats_pool_config_for_job(workspace, args.job_id)
try:
remove_restart_file(workspace)
except Exception:
print("Could not remove the restart.fl / shutdown.fl file. Please check your system before starting FL.")
sys.exit(-1)
restart_file = workspace.get_file_path_in_root("restart.fl")
if os.path.exists(restart_file):
os.remove(restart_file)
fobs_initialize()
# Initialize audit service since the job execution will need it!
audit_file_name = workspace.get_audit_file_path()
AuditService.initialize(audit_file_name)
# print("starting the client .....")
SecurityContentService.initialize(content_folder=workspace.get_startup_kit_dir())
thread = None
stop_event = threading.Event()
deployer = None
client_app_runner = None
federated_client = None
app_root = workspace.get_app_dir(str(args.job_id))
logger = None
try:
conf = FLClientStarterConfiger(
workspace=workspace,
args=args,
kv_list=args.set,
)
conf.configure()
log_file = workspace.get_app_log_file_path(args.job_id)
add_logfile_handler(log_file)
logger = logging.getLogger("worker_process")
logger.info("Worker_process started.")
deployer = conf.base_deployer
# federated_client = deployer.create_fed_client(args, args.sp_target)
federated_client = deployer.create_fed_client(args)
federated_client.status = ClientStatus.STARTING
federated_client.token = args.token
federated_client.ssid = args.ssid
federated_client.client_name = args.client_name
federated_client.fl_ctx.set_prop(FLContextKey.CLIENT_NAME, args.client_name, private=False)
federated_client.fl_ctx.set_prop(EngineConstant.FL_TOKEN, args.token, private=False)
federated_client.fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True)
client_app_runner = ClientAppRunner(time_out=kv_list.get("app_runner_timeout", 60.0))
# start parent process checking thread
thread = threading.Thread(target=monitor_parent_process, args=(client_app_runner, parent_pid, stop_event))
thread.start()
sp = _create_sp(args)
client_app_runner.start_run(app_root, args, config_folder, federated_client, secure_train, sp, conf.handlers)
except Exception as e:
if logger:
logger.error(f"FL client execution exception: {secure_format_exception(e)}")
raise e
finally:
if client_app_runner:
client_app_runner.close()
if deployer:
deployer.close()
if federated_client:
federated_client.terminate()
stop_event.set()
if thread and thread.is_alive():
thread.join()
AuditService.close()
err = create_stats_pool_files_for_job(workspace, args.job_id)
if err:
logger.warning(err)
def _create_sp(args):
sp = SP()
target = args.sp_target.split(":")
sp.name = target[0]
sp.fl_port = target[1]
sp.service_session_id = args.ssid
sp.primary = True
return sp
def remove_restart_file(workspace: Workspace):
"""To remove the restart.fl file.
Args:
workspace: workspace object
"""
restart_file = workspace.get_file_path_in_root("restart.fl")
if os.path.exists(restart_file):
os.remove(restart_file)
restart_file = workspace.get_file_path_in_root("shutdown.fl")
if os.path.exists(restart_file):
os.remove(restart_file)
if __name__ == "__main__":
"""
This is the program when starting the child process for running the NVIDIA FLARE executor.
"""
# main()
rc = mpm.run(main_func=main)
sys.exit(rc)
| NVFlare-main | nvflare/private/fed/app/client/worker_process.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
from io import BytesIO
from typing import Optional, Set, Tuple
from zipfile import ZipFile
from nvflare.apis.fl_constant import JobConstants
from nvflare.apis.job_def import ALL_SITES, SERVER_SITE_NAME, JobMetaKey
from nvflare.apis.job_meta_validator_spec import JobMetaValidatorSpec
from nvflare.fuel.utils.config import ConfigFormat
from nvflare.fuel.utils.config_factory import ConfigFactory
from nvflare.security.logging import secure_format_exception
MAX_CLIENTS = 1000000
logger = logging.getLogger(__name__)
class JobMetaValidator(JobMetaValidatorSpec):
"""Job validator"""
def validate(self, job_name: str, job_data: bytes) -> Tuple[bool, str, dict]:
"""Validate job
Args:
job_name (str): Job name
job_data (bytes): Job ZIP data
Returns:
Tuple[bool, str, dict]: (is_valid, error_message, meta)
"""
meta = {}
try:
with ZipFile(BytesIO(job_data), "r") as zf:
meta = self._validate_meta(job_name, zf)
site_list = self._validate_deploy_map(job_name, meta)
self._validate_app(job_name, meta, zf)
clients = self._get_all_clients(site_list)
self._validate_min_clients(job_name, meta, clients)
self._validate_resource(job_name, meta)
self._validate_mandatory_clients(job_name, meta, clients)
except ValueError as e:
return False, secure_format_exception(e), meta
return True, "", meta
@staticmethod
def _validate_meta(job_name: str, zf: ZipFile) -> Optional[dict]:
base_meta_file = f"{job_name}/{JobConstants.META}"
logger.debug(f"validate file {base_meta_file}.[json|conf|yml] exists for job {job_name}")
meta = None
for ext, fmt in ConfigFormat.config_ext_formats().items():
meta_file = f"{base_meta_file}{ext}"
if meta_file in zf.namelist():
config_loader = ConfigFactory.get_config_loader(fmt)
meta_data = zf.read(meta_file)
meta = config_loader.load_config_from_str(meta_data.decode()).to_dict()
break
return meta
@staticmethod
def _validate_deploy_map(job_name: str, meta: dict) -> list:
if not meta:
raise ValueError(f"{JobConstants.META}.[json|conf|yml] is empty for job {job_name}")
deploy_map = meta.get(JobMetaKey.DEPLOY_MAP.value)
if not deploy_map:
raise ValueError(f"deploy_map is empty for job {job_name}")
site_list = [site for deployments in deploy_map.values() for site in deployments]
if not site_list:
raise ValueError(f"No site is specified in deploy_map for job {job_name}")
if ALL_SITES.casefold() in (site.casefold() for site in site_list):
# if ALL_SITES is specified, no other site can be in the list
if len(site_list) > 1:
raise ValueError(f"No other site can be specified if {ALL_SITES} is used for job {job_name}")
else:
site_list = [ALL_SITES]
elif SERVER_SITE_NAME not in site_list:
raise ValueError(f"Missing server site in deploy_map for job {job_name}")
else:
duplicates = [site for site, count in collections.Counter(site_list).items() if count > 1]
if duplicates:
raise ValueError(f"Multiple apps to be deployed to following sites {duplicates} for job {job_name}")
return site_list
def _validate_app(self, job_name: str, meta: dict, zip_file: ZipFile) -> None:
deploy_map = meta.get(JobMetaKey.DEPLOY_MAP.value)
for app, deployments in deploy_map.items():
zip_folder = job_name + "/" + app + "/config/"
if not self._entry_exists(zip_file, zip_folder):
logger.debug(f"zip folder {zip_folder} missing. Files in the zip:")
for x in zip_file.namelist():
logger.debug(f" {x}")
raise ValueError(f"App '{app}' in deploy_map doesn't exist for job {job_name}")
all_sites = ALL_SITES.casefold() in (site.casefold() for site in deployments)
if (all_sites or SERVER_SITE_NAME in deployments) and not self._config_exists(
zip_file, zip_folder, JobConstants.SERVER_JOB_CONFIG
):
raise ValueError(f"App '{app}' will be deployed to server but server config is missing")
if (all_sites or [site for site in deployments if site != SERVER_SITE_NAME]) and not self._config_exists(
zip_file, zip_folder, JobConstants.CLIENT_JOB_CONFIG
):
raise ValueError(f"App '{app}' will be deployed to client but client config is missing")
@staticmethod
def _convert_value_to_int(v) -> int:
if isinstance(v, int):
return v
else:
try:
v = int(v)
return v
except ValueError as e:
raise ValueError(f"invalid data type for {v},can't not convert to Int", secure_format_exception(e))
except TypeError as e:
raise ValueError(f"invalid data type for {v},can't not convert to Int", secure_format_exception(e))
def _validate_min_clients(self, job_name: str, meta: dict, clients: set) -> None:
logger.debug(f"validate min_clients for job {job_name}")
value = meta.get(JobMetaKey.MIN_CLIENTS)
if value is not None:
min_clients = self._convert_value_to_int(value)
if min_clients <= 0:
raise ValueError(f"min_clients {min_clients} must be positive for job {job_name}")
elif min_clients > MAX_CLIENTS:
raise ValueError(f"min_clients {min_clients} must be less than {MAX_CLIENTS} for job {job_name}")
if next(iter(clients)) != ALL_SITES and len(clients) < min_clients:
raise ValueError(f"min {min_clients} clients required for job {job_name}, found {len(clients)}.")
@staticmethod
def _validate_mandatory_clients(job_name: str, meta: dict, clients: set) -> None:
logger.debug(f" validate mandatory_clients for job {job_name}")
if next(iter(clients)) != ALL_SITES:
# Validating mandatory clients are deployed
mandatory_clients = meta.get(JobMetaKey.MANDATORY_CLIENTS)
if mandatory_clients:
mandatory_set = set(mandatory_clients)
if not mandatory_set.issubset(clients):
diff = mandatory_set - clients
raise ValueError(f"Mandatory clients {diff} are not in the deploy_map for job {job_name}")
@staticmethod
def _validate_resource(job_name: str, meta: dict) -> None:
logger.debug(f"validate resource for job {job_name}")
resource_spec = meta.get(JobMetaKey.RESOURCE_SPEC.value)
if resource_spec and not isinstance(resource_spec, dict):
raise ValueError(f"Invalid resource_spec for job {job_name}")
if not resource_spec:
logger.debug("empty resource spec provided")
if resource_spec:
for k in resource_spec:
if resource_spec[k] and not isinstance(resource_spec[k], dict):
raise ValueError(f"value for key {k} in resource spec is expecting a dictionary")
@staticmethod
def _get_all_clients(site_list: Optional[list]) -> Set[str]:
if site_list[0] == ALL_SITES:
return {ALL_SITES}
return set([site for site in site_list if site != SERVER_SITE_NAME])
@staticmethod
def _entry_exists(zip_file: ZipFile, path: str) -> bool:
try:
zip_file.getinfo(path)
return True
except KeyError:
return False
@staticmethod
def _config_exists(zip_file: ZipFile, zip_folder, init_config_path: str) -> bool:
def match(parent: ZipFile, config_path: str) -> bool:
import os
full_path = os.path.join(zip_folder, config_path)
return full_path in parent.namelist()
return ConfigFactory.match_config(zip_file, init_config_path, match)
| NVFlare-main | nvflare/private/fed/server/job_meta_validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from nvflare.apis.fl_constant import MachineStatus
class RunInfo(object):
def __init__(self, job_id, app_path):
"""Information for a run."""
self.job_id = job_id
self.start_time = time.time()
self.app_path = app_path
self.status = MachineStatus.STOPPED.value
| NVFlare-main | nvflare/private/fed/server/run_info.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import SystemConfigs
from nvflare.apis.responder import Responder
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.fuel.utils.config_service import ConfigService
from nvflare.fuel.utils.json_scanner import Node
from nvflare.private.fed_json_config import FedJsonConfigurator
from nvflare.private.json_configer import ConfigContext, ConfigError
from .server_runner import ServerRunnerConfig
FL_PACKAGES = ["nvflare"]
FL_MODULES = ["apis", "app_common", "widgets", "app_opt"]
class WorkFlow:
def __init__(self, id, responder: Responder):
"""Workflow is a responder with ID.
Args:
id: identification
responder (Responder): A responder
"""
self.id = id
self.responder = responder
class ServerJsonConfigurator(FedJsonConfigurator):
def __init__(self, config_file_name: str, args, app_root: str, kv_list=None, exclude_libs=True):
"""This class parses server config from json file.
Args:
config_file_name (str): json file to parse
exclude_libs (bool): whether to exclude libs
"""
self.config_file_name = config_file_name
self.args = args
self.app_root = app_root
base_pkgs = FL_PACKAGES
module_names = FL_MODULES
FedJsonConfigurator.__init__(
self,
config_file_name=config_file_name,
base_pkgs=base_pkgs,
module_names=module_names,
exclude_libs=exclude_libs,
is_server=True,
)
if kv_list:
assert isinstance(kv_list, list), "cmd_vars must be list, but got {}".format(type(kv_list))
self.cmd_vars = parse_vars(kv_list)
else:
self.cmd_vars = {}
self.config_files = [config_file_name]
self.runner_config = None
# if server doesn't hear heartbeat from client for this long, we'll consider the client dead
self.heartbeat_timeout = 60 # default to 1 minute
# server will ask client to come back for next task after this many secs
self.task_request_interval = 2 # default to 2 secs
# workflows to be executed
self.workflows = []
def process_config_element(self, config_ctx: ConfigContext, node: Node):
FedJsonConfigurator.process_config_element(self, config_ctx, node)
element = node.element
path = node.path()
if path == "server.heart_beat_timeout":
self.heartbeat_timeout = element
if not isinstance(element, int) and not isinstance(element, float):
raise ConfigError('"heart_beat_timeout" must be a number, but got {}'.format(type(element)))
if element <= 0.0:
raise ConfigError('"heart_beat_timeout" must be positive number, but got {}'.format(element))
return
if path == "server.task_request_interval":
self.task_request_interval = element
if not isinstance(element, int) and not isinstance(element, float):
raise ConfigError('"task_request_interval" must be a number, but got {}'.format(type(element)))
if element <= 0:
raise ConfigError('"task_request_interval" must > 0, but got {}'.format(element))
return
if re.search(r"^workflows\.#[0-9]+$", path):
workflow = self.authorize_and_build_component(element, config_ctx, node)
if not isinstance(workflow, Responder):
raise ConfigError(
'"workflow" must be a Responder or Controller object, but got {}'.format(type(workflow))
)
cid = element.get("id", None)
if not cid:
cid = type(workflow).__name__
if not isinstance(cid, str):
raise ConfigError('"id" must be str but got {}'.format(type(cid)))
if cid in self._get_all_workflows_ids():
raise ConfigError('duplicate workflow id "{}"'.format(cid))
if cid in self.components:
raise ConfigError('duplicate component id "{}"'.format(cid))
self.workflows.append(WorkFlow(cid, workflow))
self.components[cid] = workflow
return
def _get_all_workflows_ids(self):
ids = []
for t in self.workflows:
ids.append(t.id)
return ids
def build_component(self, config_dict):
t = super().build_component(config_dict)
if isinstance(t, FLComponent):
self.handlers.append(t)
return t
def finalize_config(self, config_ctx: ConfigContext):
FedJsonConfigurator.finalize_config(self, config_ctx)
if not self.workflows:
raise ConfigError("workflows not specified")
self.runner_config = ServerRunnerConfig(
heartbeat_timeout=self.heartbeat_timeout,
task_request_interval=self.task_request_interval,
workflows=self.workflows,
task_data_filters=self.data_filter_table,
task_result_filters=self.result_filter_table,
components=self.components,
handlers=self.handlers,
)
ConfigService.initialize(
section_files={SystemConfigs.APPLICATION_CONF: os.path.basename(self.config_files[0])},
config_path=[self.app_root],
parsed_args=self.args,
var_dict=self.cmd_vars,
)
| NVFlare-main | nvflare/private/fed/server/server_json_config.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from nvflare.apis.job_def import JobMetaKey
from nvflare.apis.server_engine_spec import ServerEngineSpec
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.proto import MetaKey, MetaStatusValue, make_meta
from nvflare.fuel.hci.server.authz import PreAuthzReturnCode
from nvflare.fuel.hci.server.constants import ConnProps
from nvflare.private.fed.server.admin import FedAdminServer
class CommandUtil(object):
TARGET_CLIENTS = "target_clients"
TARGET_CLIENT_TOKENS = "target_client_tokens"
TARGET_CLIENT_NAMES = "target_client_names"
TARGET_TYPE = "target_type"
TARGET_TYPE_CLIENT = "client"
TARGET_TYPE_SERVER = "server"
TARGET_TYPE_ALL = "all"
JOB_ID = "job_id"
JOB = "job"
def command_authz_required(self, conn: Connection, args: List[str]) -> PreAuthzReturnCode:
return PreAuthzReturnCode.REQUIRE_AUTHZ
def authorize_client_operation(self, conn: Connection, args: List[str]) -> PreAuthzReturnCode:
auth_args = [args[0], self.TARGET_TYPE_CLIENT]
auth_args.extend(args[1:])
err = self.validate_command_targets(conn, auth_args[1:])
if err:
conn.append_error(err)
return PreAuthzReturnCode.ERROR
return PreAuthzReturnCode.REQUIRE_AUTHZ
def validate_command_targets(self, conn: Connection, args: List[str]) -> str:
"""Validate specified args and determine and set target type and target names in the Connection.
The args must be like this:
target_type client_names ...
where target_type is one of 'all', 'client', 'server'
Args:
conn: A Connection object.
args: Specified arguments.
Returns:
An error message. It is empty "" if no error found.
"""
# return target type and a list of target names
if len(args) < 1:
return "missing target type (server or client)"
target_type = args[0]
conn.set_prop(self.TARGET_TYPE, target_type)
if target_type == self.TARGET_TYPE_SERVER:
return ""
if target_type == self.TARGET_TYPE_CLIENT:
client_names = args[1:]
elif target_type == self.TARGET_TYPE_ALL:
client_names = []
else:
return "unknown target type {}".format(target_type)
engine = conn.app_ctx
if not isinstance(engine, ServerEngineSpec):
raise TypeError("engine must be ServerEngineSpec but got {}".format(type(engine)))
if len(client_names) == 0:
# get all clients
clients = engine.get_clients()
else:
clients, invalid_inputs = engine.validate_targets(client_names)
if invalid_inputs:
return "invalid client(s): {}".format(" ".join(invalid_inputs))
if target_type == self.TARGET_TYPE_CLIENT and not clients:
return "no clients available"
valid_tokens = []
client_names = []
all_clients = {}
for c in clients:
valid_tokens.append(c.token)
client_names.append(c.name)
all_clients[c.token] = c.name
conn.set_prop(self.TARGET_CLIENT_TOKENS, valid_tokens)
# if clients:
# client_names = [c.name for c in clients]
# else:
# client_names = []
conn.set_prop(self.TARGET_CLIENT_NAMES, client_names)
conn.set_prop(self.TARGET_CLIENTS, all_clients)
return ""
def must_be_project_admin(self, conn: Connection, args: List[str]):
role = conn.get_prop(ConnProps.USER_ROLE, "")
if role not in ["project_admin"]:
conn.append_error(f"Not authorized for {role}", meta=make_meta(MetaStatusValue.NOT_AUTHORIZED))
return PreAuthzReturnCode.ERROR
else:
return PreAuthzReturnCode.OK
def authorize_server_operation(self, conn: Connection, args: List[str]):
err = self.validate_command_targets(conn, args[1:])
if err:
conn.append_error(err)
return PreAuthzReturnCode.ERROR
target_type = conn.get_prop(self.TARGET_TYPE)
if target_type == self.TARGET_TYPE_SERVER or target_type == self.TARGET_TYPE_ALL:
return PreAuthzReturnCode.REQUIRE_AUTHZ
else:
return PreAuthzReturnCode.OK
def send_request_to_clients(self, conn, message):
client_tokens = conn.get_prop(self.TARGET_CLIENT_TOKENS)
if not client_tokens:
return None
requests = {}
for token in client_tokens:
requests.update({token: message})
admin_server: FedAdminServer = conn.server
cmd_timeout = conn.get_prop(ConnProps.CMD_TIMEOUT)
if not cmd_timeout:
cmd_timeout = admin_server.timeout
replies = admin_server.send_requests(requests, timeout_secs=cmd_timeout)
return replies
@staticmethod
def get_job_name(meta: dict) -> str:
"""Gets job name from job meta."""
name = meta.get(JobMetaKey.JOB_NAME)
if not name:
name = meta.get(JobMetaKey.JOB_FOLDER_NAME, "No name")
return name
def process_replies_to_table(self, conn: Connection, replies):
"""Process the clients' replies and put in a table format.
Args:
conn: A Connection object.
replies: replies from clients
"""
if not replies:
conn.append_string("no responses from clients")
table = conn.append_table(["Client", "Response"])
for r in replies:
if r.reply:
resp = r.reply.body
else:
resp = ""
client_name = r.client_name
if not client_name:
clients = conn.get_prop(self.TARGET_CLIENTS)
client_name = clients.get(r.client_token, "")
table.add_row([client_name, resp])
def _process_replies_to_string(self, conn: Connection, replies) -> str:
"""Process the clients replies and put in a string format.
Args:
conn: A Connection object.
replies: replies from clients
Returns:
A string response.
"""
response = "no responses from clients"
client_replies = {}
if replies:
response = ""
for r in replies:
client_name = r.client_name
response += "client:" + client_name
if r.reply:
response += " : " + r.reply.body + "\n"
client_replies[client_name] = r.reply.body
else:
response += " : No replies\n"
client_replies[client_name] = MetaStatusValue.NO_REPLY
conn.update_meta({MetaKey.CLIENT_STATUS: client_replies})
return response
| NVFlare-main | nvflare/private/fed/server/cmd_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import logging
import os
import shutil
from typing import Dict, List
import nvflare.fuel.hci.file_transfer_defs as ftd
from nvflare.apis.client import Client
from nvflare.apis.fl_constant import AdminCommandNames, RunProcessKey
from nvflare.apis.job_def import Job, JobDataKey, JobMetaKey, TopDir, is_valid_job_id
from nvflare.apis.job_def_manager_spec import JobDefManagerSpec, RunStatus
from nvflare.apis.storage import DATA, JOB_ZIP, META, META_JSON, WORKSPACE, WORKSPACE_ZIP
from nvflare.apis.utils.job_utils import convert_legacy_zipped_app_to_job
from nvflare.fuel.hci.base64_utils import b64str_to_bytes
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.proto import ConfirmMethod, MetaKey, MetaStatusValue, make_meta
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandSpec
from nvflare.fuel.hci.server.authz import PreAuthzReturnCode
from nvflare.fuel.hci.server.binary_transfer import BinaryTransfer
from nvflare.fuel.hci.server.constants import ConnProps
from nvflare.fuel.utils.argument_utils import SafeArgumentParser
from nvflare.fuel.utils.zip_utils import ls_zip_from_bytes, unzip_all_from_bytes
from nvflare.private.defs import RequestHeader, TrainingTopic
from nvflare.private.fed.server.admin import new_message
from nvflare.private.fed.server.job_meta_validator import JobMetaValidator
from nvflare.private.fed.server.server_engine import ServerEngine
from nvflare.private.fed.server.server_engine_internal_spec import ServerEngineInternalSpec
from nvflare.security.logging import secure_format_exception, secure_log_traceback
from .cmd_utils import CommandUtil
CLONED_META_KEYS = {
JobMetaKey.JOB_NAME.value,
JobMetaKey.JOB_FOLDER_NAME.value,
JobMetaKey.DEPLOY_MAP.value,
JobMetaKey.RESOURCE_SPEC.value,
JobMetaKey.CONTENT_LOCATION.value,
JobMetaKey.RESULT_LOCATION.value,
JobMetaKey.APPROVALS.value,
JobMetaKey.MIN_CLIENTS.value,
JobMetaKey.MANDATORY_CLIENTS.value,
}
def _create_list_job_cmd_parser():
parser = SafeArgumentParser(prog=AdminCommandNames.LIST_JOBS)
parser.add_argument("job_id", nargs="?", help="Job ID prefix")
parser.add_argument("-d", action="store_true", help="Show detailed list")
parser.add_argument("-u", action="store_true", help="List jobs submitted by the same user")
parser.add_argument("-r", action="store_true", help="List jobs in reverse order of submission time")
parser.add_argument("-n", help="Filter by job name prefix")
parser.add_argument(
"-m",
type=int,
help="Maximum number of jobs that will be listed",
)
return parser
class JobCommandModule(CommandModule, CommandUtil, BinaryTransfer):
"""Command module with commands for job management."""
def __init__(self):
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def get_spec(self):
return CommandModuleSpec(
name="job_mgmt",
cmd_specs=[
CommandSpec(
name=AdminCommandNames.DELETE_WORKSPACE,
description="delete the workspace of a job",
usage=f"{AdminCommandNames.DELETE_WORKSPACE} job_id",
handler_func=self.delete_job_id,
authz_func=self.authorize_job,
enabled=False,
confirm=ConfirmMethod.AUTH,
),
CommandSpec(
name=AdminCommandNames.START_APP,
description="start the FL app",
usage=f"{AdminCommandNames.START_APP} job_id server|client|all",
handler_func=self.start_app,
authz_func=self.authorize_job,
),
CommandSpec(
name=AdminCommandNames.LIST_JOBS,
description="list submitted jobs",
usage=f"{AdminCommandNames.LIST_JOBS} [-n name_prefix] [-d] [-u] [-r] [-m num_of_jobs] [job_id_prefix]",
handler_func=self.list_jobs,
authz_func=self.command_authz_required,
),
CommandSpec(
name=AdminCommandNames.GET_JOB_META,
description="get meta info of specified job",
usage=f"{AdminCommandNames.GET_JOB_META} job_id",
handler_func=self.get_job_meta,
authz_func=self.authorize_job,
),
CommandSpec(
name=AdminCommandNames.DELETE_JOB,
description="delete a job and persisted workspace",
usage=f"{AdminCommandNames.DELETE_JOB} job_id",
handler_func=self.delete_job,
authz_func=self.authorize_job,
confirm=ConfirmMethod.AUTH,
),
CommandSpec(
name=AdminCommandNames.ABORT_JOB,
description="abort a job if it is running or dispatched",
usage=f"{AdminCommandNames.ABORT_JOB} job_id",
handler_func=self.abort_job, # see if running, if running, send abort command
authz_func=self.authorize_job,
confirm=ConfirmMethod.YESNO,
),
CommandSpec(
name=AdminCommandNames.CLONE_JOB,
description="clone a job with a new job_id",
usage=f"{AdminCommandNames.CLONE_JOB} job_id",
handler_func=self.clone_job,
authz_func=self.authorize_job,
),
CommandSpec(
name=AdminCommandNames.SUBMIT_JOB,
description="submit a job",
usage=f"{AdminCommandNames.SUBMIT_JOB} job_folder",
handler_func=self.submit_job,
authz_func=self.command_authz_required,
client_cmd=ftd.UPLOAD_FOLDER_FQN,
),
CommandSpec(
name=AdminCommandNames.DOWNLOAD_JOB,
description="download a specified job",
usage=f"{AdminCommandNames.DOWNLOAD_JOB} job_id",
handler_func=self.download_job,
authz_func=self.authorize_job,
client_cmd=ftd.PULL_FOLDER_FQN,
),
CommandSpec(
name=AdminCommandNames.DOWNLOAD_JOB_FILE,
description="download a specified job file",
usage=f"{AdminCommandNames.DOWNLOAD_JOB_FILE} job_id file_name",
handler_func=self.pull_file,
authz_func=self.authorize_job_file,
client_cmd=ftd.PULL_BINARY_FQN,
visible=False,
),
],
)
def authorize_job_file(self, conn: Connection, args: List[str]):
if len(args) < 2:
conn.append_error(
"syntax error: missing job_id", meta=make_meta(MetaStatusValue.SYNTAX_ERROR, "missing job_id")
)
return PreAuthzReturnCode.ERROR
return self.authorize_job(conn, args[0:2])
def authorize_job(self, conn: Connection, args: List[str]):
if len(args) < 2:
conn.append_error(
"syntax error: missing job_id", meta=make_meta(MetaStatusValue.SYNTAX_ERROR, "missing job_id")
)
return PreAuthzReturnCode.ERROR
job_id = args[1].lower()
if not is_valid_job_id(job_id):
conn.append_error(f"invalid job_id {job_id}", meta=make_meta(MetaStatusValue.INVALID_JOB_ID, job_id))
return PreAuthzReturnCode.ERROR
conn.set_prop(self.JOB_ID, job_id)
engine = conn.app_ctx
job_def_manager = engine.job_def_manager
with engine.new_context() as fl_ctx:
job = job_def_manager.get_job(job_id, fl_ctx)
if not job:
conn.append_error(
f"Job with ID {job_id} doesn't exist", meta=make_meta(MetaStatusValue.INVALID_JOB_ID, job_id)
)
return PreAuthzReturnCode.ERROR
conn.set_prop(self.JOB, job)
conn.set_prop(ConnProps.SUBMITTER_NAME, job.meta.get(JobMetaKey.SUBMITTER_NAME, ""))
conn.set_prop(ConnProps.SUBMITTER_ORG, job.meta.get(JobMetaKey.SUBMITTER_ORG, ""))
conn.set_prop(ConnProps.SUBMITTER_ROLE, job.meta.get(JobMetaKey.SUBMITTER_ROLE, ""))
if len(args) > 2:
err = self.validate_command_targets(conn, args[2:])
if err:
conn.append_error(err, meta=make_meta(MetaStatusValue.INVALID_TARGET, err))
return PreAuthzReturnCode.ERROR
return PreAuthzReturnCode.REQUIRE_AUTHZ
def _start_app_on_clients(self, conn: Connection, job_id: str) -> bool:
engine = conn.app_ctx
client_names = conn.get_prop(self.TARGET_CLIENT_NAMES, None)
run_process = engine.run_processes.get(job_id, {})
if not run_process:
conn.append_error(f"Job {job_id} is not running.")
return False
participants: Dict[str, Client] = run_process.get(RunProcessKey.PARTICIPANTS, {})
wrong_clients = []
for client in client_names:
client_valid = False
for _, p in participants.items():
if client == p.name:
client_valid = True
break
if not client_valid:
wrong_clients.append(client)
if wrong_clients:
display_clients = ",".join(wrong_clients)
conn.append_error(f"{display_clients} are not in the job running list.")
return False
err = engine.check_app_start_readiness(job_id)
if err:
conn.append_error(err)
return False
message = new_message(conn, topic=TrainingTopic.START, body="", require_authz=False)
message.set_header(RequestHeader.JOB_ID, job_id)
replies = self.send_request_to_clients(conn, message)
self.process_replies_to_table(conn, replies)
return True
def start_app(self, conn: Connection, args: List[str]):
engine = conn.app_ctx
if not isinstance(engine, ServerEngineInternalSpec):
raise TypeError("engine must be ServerEngineInternalSpec but got {}".format(type(engine)))
job_id = conn.get_prop(self.JOB_ID)
if len(args) < 3:
conn.append_error("Please provide the target name (client / all) for start_app command.")
return
target_type = args[2]
if target_type == self.TARGET_TYPE_SERVER:
# if not self._start_app_on_server(conn, job_id):
# return
conn.append_error("start_app command only supports client app start.")
return
elif target_type == self.TARGET_TYPE_CLIENT:
if not self._start_app_on_clients(conn, job_id):
return
else:
# # all
# success = self._start_app_on_server(conn, job_id)
#
# if success:
client_names = conn.get_prop(self.TARGET_CLIENT_NAMES, None)
if client_names:
if not self._start_app_on_clients(conn, job_id):
return
conn.append_success("")
def delete_job_id(self, conn: Connection, args: List[str]):
job_id = args[1]
engine = conn.app_ctx
if not isinstance(engine, ServerEngine):
raise TypeError("engine must be ServerEngine but got {}".format(type(engine)))
if job_id in engine.run_processes.keys():
conn.append_error(f"Current running run_{job_id} can not be deleted.")
return
err = engine.delete_job_id(job_id)
if err:
conn.append_error(err)
return
# ask clients to delete this RUN
message = new_message(conn, topic=TrainingTopic.DELETE_RUN, body="", require_authz=False)
message.set_header(RequestHeader.JOB_ID, str(job_id))
clients = engine.get_clients()
if clients:
conn.set_prop(self.TARGET_CLIENT_TOKENS, [x.token for x in clients])
replies = self.send_request_to_clients(conn, message)
self.process_replies_to_table(conn, replies)
conn.append_success("")
def list_jobs(self, conn: Connection, args: List[str]):
try:
parser = _create_list_job_cmd_parser()
parsed_args = parser.parse_args(args[1:])
engine = conn.app_ctx
job_def_manager = engine.job_def_manager
if not isinstance(job_def_manager, JobDefManagerSpec):
raise TypeError(
f"job_def_manager in engine is not of type JobDefManagerSpec, but got {type(job_def_manager)}"
)
with engine.new_context() as fl_ctx:
jobs = job_def_manager.get_all_jobs(fl_ctx)
if jobs:
id_prefix = parsed_args.job_id
name_prefix = parsed_args.n
max_jobs_listed = parsed_args.m
user_name = conn.get_prop(ConnProps.USER_NAME, "") if parsed_args.u else None
filtered_jobs = [job for job in jobs if self._job_match(job.meta, id_prefix, name_prefix, user_name)]
if not filtered_jobs:
conn.append_string(
"No jobs matching the specified criteria.",
meta=make_meta(MetaStatusValue.OK, extra={MetaKey.JOBS: []}),
)
return
reverse = True if parsed_args.r else False
filtered_jobs.sort(key=lambda job: job.meta.get(JobMetaKey.SUBMIT_TIME.value, 0.0), reverse=reverse)
if max_jobs_listed:
if reverse:
filtered_jobs = filtered_jobs[:max_jobs_listed]
else:
filtered_jobs = filtered_jobs[-max_jobs_listed:]
if parsed_args.d:
self._send_detail_list(conn, filtered_jobs)
else:
self._send_summary_list(conn, filtered_jobs)
else:
conn.append_string("No jobs found.", meta=make_meta(MetaStatusValue.OK, extra={MetaKey.JOBS: []}))
except Exception as e:
conn.append_error(
secure_format_exception(e),
meta=make_meta(MetaStatusValue.INTERNAL_ERROR, info=secure_format_exception(e)),
)
return
conn.append_success("")
def delete_job(self, conn: Connection, args: List[str]):
job = conn.get_prop(self.JOB)
if not job:
conn.append_error(
"program error: job not set in conn", meta=make_meta(MetaStatusValue.INTERNAL_ERROR, "no job")
)
return
job_id = conn.get_prop(self.JOB_ID)
if job.meta.get(JobMetaKey.STATUS, "") in [RunStatus.DISPATCHED.value, RunStatus.RUNNING.value]:
conn.append_error(
f"job: {job_id} is running, could not be deleted at this time.",
meta=make_meta(MetaStatusValue.JOB_RUNNING, job_id),
)
return
try:
engine = conn.app_ctx
job_def_manager = engine.job_def_manager
with engine.new_context() as fl_ctx:
job_def_manager.delete(job_id, fl_ctx)
conn.append_string(f"Job {job_id} deleted.")
except Exception as e:
conn.append_error(
f"exception occurred: {secure_format_exception(e)}",
meta=make_meta(MetaStatusValue.INTERNAL_ERROR, f"exception {type(e)}"),
)
return
conn.append_success("", meta=make_meta(MetaStatusValue.OK))
def get_job_meta(self, conn: Connection, args: List[str]):
job_id = conn.get_prop(self.JOB_ID)
engine = conn.app_ctx
job_def_manager = engine.job_def_manager
if not isinstance(job_def_manager, JobDefManagerSpec):
raise TypeError(
f"job_def_manager in engine is not of type JobDefManagerSpec, but got {type(job_def_manager)}"
)
with engine.new_context() as fl_ctx:
job = job_def_manager.get_job(jid=job_id, fl_ctx=fl_ctx)
if job:
conn.append_dict(job.meta, meta=make_meta(MetaStatusValue.OK, extra={MetaKey.JOB_META: job.meta}))
else:
conn.append_error(
f"job {job_id} does not exist", meta=make_meta(MetaStatusValue.INVALID_JOB_ID, job_id)
)
def abort_job(self, conn: Connection, args: List[str]):
engine = conn.app_ctx
job_runner = engine.job_runner
try:
job_id = conn.get_prop(self.JOB_ID)
with engine.new_context() as fl_ctx:
job_manager = engine.job_def_manager
job = job_manager.get_job(job_id, fl_ctx)
job_status = job.meta.get(JobMetaKey.STATUS)
if job_status in [RunStatus.SUBMITTED, RunStatus.DISPATCHED]:
job_manager.set_status(job.job_id, RunStatus.FINISHED_ABORTED, fl_ctx)
message = f"Aborted the job {job_id} before running it."
conn.append_string(message)
conn.append_success("", meta=make_meta(MetaStatusValue.OK, message))
return
elif job_status.startswith("FINISHED:"):
message = f"Job for {job_id} is already completed."
conn.append_string(message)
conn.append_success("", meta=make_meta(MetaStatusValue.OK, message))
else:
message = job_runner.stop_run(job_id, fl_ctx)
if message:
conn.append_error(message, meta=make_meta(MetaStatusValue.INTERNAL_ERROR, message))
else:
message = "Abort signal has been sent to the server app."
conn.append_string(message)
conn.append_success("", meta=make_meta(MetaStatusValue.OK, message))
except Exception as e:
conn.append_error(
f"Exception occurred trying to abort job: {secure_format_exception(e)}",
meta=make_meta(MetaStatusValue.INTERNAL_ERROR, f"exception {type(e)}"),
)
return
def clone_job(self, conn: Connection, args: List[str]):
job = conn.get_prop(self.JOB)
job_id = conn.get_prop(self.JOB_ID)
engine = conn.app_ctx
try:
if not isinstance(engine, ServerEngine):
raise TypeError(f"engine is not of type ServerEngine, but got {type(engine)}")
job_def_manager = engine.job_def_manager
if not isinstance(job_def_manager, JobDefManagerSpec):
raise TypeError(
f"job_def_manager in engine is not of type JobDefManagerSpec, but got {type(job_def_manager)}"
)
with engine.new_context() as fl_ctx:
data_bytes = job_def_manager.get_content(job_id, fl_ctx)
job_meta = {str(k): job.meta[k] for k in job.meta.keys() & CLONED_META_KEYS}
# set the submitter info for the new job
job_meta[JobMetaKey.SUBMITTER_NAME.value] = conn.get_prop(ConnProps.USER_NAME)
job_meta[JobMetaKey.SUBMITTER_ORG.value] = conn.get_prop(ConnProps.USER_ORG)
job_meta[JobMetaKey.SUBMITTER_ROLE.value] = conn.get_prop(ConnProps.USER_ROLE)
job_meta[JobMetaKey.CLONED_FROM.value] = job_id
meta = job_def_manager.create(job_meta, data_bytes, fl_ctx)
new_job_id = meta.get(JobMetaKey.JOB_ID)
conn.append_string("Cloned job {} as: {}".format(job_id, new_job_id))
except Exception as e:
conn.append_error(
f"Exception occurred trying to clone job: {secure_format_exception(e)}",
meta=make_meta(MetaStatusValue.INTERNAL_ERROR, f"exception {type(e)}"),
)
return
conn.append_success("", meta=make_meta(status=MetaStatusValue.OK, extra={MetaKey.JOB_ID: new_job_id}))
def authorize_list_files(self, conn: Connection, args: List[str]):
if len(args) < 2:
conn.append_error("syntax error: missing job_id")
return False, None
if len(args) > 3:
conn.append_error("syntax error: too many arguments")
return False, None
return self.authorize_job(conn=conn, args=args[:2])
def list_files(self, conn: Connection, args: List[str]):
job_id = conn.get_prop(self.JOB_ID)
if len(args) == 2:
conn.append_string("job\nworkspace\n\nSpecify the job or workspace dir to see detailed contents.")
return
else:
file = args[2]
engine = conn.app_ctx
try:
job_def_manager = engine.job_def_manager
if not isinstance(job_def_manager, JobDefManagerSpec):
raise TypeError(
f"job_def_manager in engine is not of type JobDefManagerSpec, but got {type(job_def_manager)}"
)
with engine.new_context() as fl_ctx:
job_data = job_def_manager.get_job_data(job_id, fl_ctx)
if file.startswith(TopDir.JOB):
file = file[len(TopDir.JOB) :]
file = file.lstrip("/")
data_bytes = job_data[JobDataKey.JOB_DATA.value]
ls_info = ls_zip_from_bytes(data_bytes)
elif file.startswith(TopDir.WORKSPACE):
file = file[len(TopDir.WORKSPACE) :]
file = file.lstrip("/")
workspace_bytes = job_data[JobDataKey.WORKSPACE_DATA.value]
ls_info = ls_zip_from_bytes(workspace_bytes)
else:
conn.append_error("syntax error: top level directory must be job or workspace")
return
return_string = "%-46s %19s %12s\n" % ("File Name", "Modified ", "Size")
for zinfo in ls_info:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
if zinfo.filename.startswith(file):
return_string += "%-46s %s %12d\n" % (zinfo.filename, date, zinfo.file_size)
conn.append_string(return_string)
except Exception as e:
secure_log_traceback()
conn.append_error(f"Exception occurred trying to get job from store: {secure_format_exception(e)}")
return
conn.append_success("")
@staticmethod
def _job_match(job_meta: Dict, id_prefix: str, name_prefix: str, user_name: str) -> bool:
return (
((not id_prefix) or job_meta.get("job_id").lower().startswith(id_prefix.lower()))
and ((not name_prefix) or job_meta.get("name").lower().startswith(name_prefix.lower()))
and ((not user_name) or job_meta.get("submitter_name") == user_name)
)
@staticmethod
def _send_detail_list(conn: Connection, jobs: List[Job]):
list_of_jobs = []
for job in jobs:
JobCommandModule._set_duration(job)
conn.append_string(json.dumps(job.meta, indent=4))
list_of_jobs.append(job.meta)
conn.append_string("", meta=make_meta(MetaStatusValue.OK, extra={MetaKey.JOBS: list_of_jobs}))
@staticmethod
def _send_summary_list(conn: Connection, jobs: List[Job]):
table = conn.append_table(["Job ID", "Name", "Status", "Submit Time", "Run Duration"], name=MetaKey.JOBS)
for job in jobs:
JobCommandModule._set_duration(job)
table_row = [
job.meta.get(JobMetaKey.JOB_ID.value, ""),
CommandUtil.get_job_name(job.meta),
job.meta.get(JobMetaKey.STATUS.value, ""),
job.meta.get(JobMetaKey.SUBMIT_TIME_ISO.value, ""),
str(job.meta.get(JobMetaKey.DURATION.value, "N/A")),
]
table.add_row(
table_row,
meta={
MetaKey.JOB_ID: job.meta.get(JobMetaKey.JOB_ID.value, ""),
MetaKey.JOB_NAME: CommandUtil.get_job_name(job.meta),
MetaKey.STATUS: job.meta.get(JobMetaKey.STATUS.value, ""),
MetaKey.SUBMIT_TIME: job.meta.get(JobMetaKey.SUBMIT_TIME_ISO.value, ""),
MetaKey.DURATION: str(job.meta.get(JobMetaKey.DURATION.value, "N/A")),
},
)
@staticmethod
def _set_duration(job):
if job.meta.get(JobMetaKey.STATUS) == RunStatus.RUNNING.value:
start_time = datetime.datetime.strptime(job.meta.get(JobMetaKey.START_TIME.value), "%Y-%m-%d %H:%M:%S.%f")
duration = datetime.datetime.now() - start_time
job.meta[JobMetaKey.DURATION.value] = str(duration)
def submit_job(self, conn: Connection, args: List[str]):
folder_name = args[1]
zip_b64str = args[2]
data_bytes = convert_legacy_zipped_app_to_job(b64str_to_bytes(zip_b64str))
engine = conn.app_ctx
try:
with engine.new_context() as fl_ctx:
job_validator = JobMetaValidator()
valid, error, meta = job_validator.validate(folder_name, data_bytes)
if not valid:
conn.append_error(error, meta=make_meta(MetaStatusValue.INVALID_JOB_DEFINITION, error))
return
job_def_manager = engine.job_def_manager
if not isinstance(job_def_manager, JobDefManagerSpec):
raise TypeError(
f"job_def_manager in engine is not of type JobDefManagerSpec, but got {type(job_def_manager)}"
)
# set submitter info
meta[JobMetaKey.SUBMITTER_NAME.value] = conn.get_prop(ConnProps.USER_NAME, "")
meta[JobMetaKey.SUBMITTER_ORG.value] = conn.get_prop(ConnProps.USER_ORG, "")
meta[JobMetaKey.SUBMITTER_ROLE.value] = conn.get_prop(ConnProps.USER_ROLE, "")
custom_props = conn.get_prop(ConnProps.CUSTOM_PROPS)
if custom_props:
meta[JobMetaKey.CUSTOM_PROPS.value] = custom_props
meta = job_def_manager.create(meta, data_bytes, fl_ctx)
job_id = meta.get(JobMetaKey.JOB_ID)
conn.append_string(f"Submitted job: {job_id}")
conn.append_success("", meta=make_meta(MetaStatusValue.OK, extra={MetaKey.JOB_ID: job_id}))
except Exception as e:
conn.append_error(
f"Exception occurred trying to submit job: {secure_format_exception(e)}",
meta=make_meta(MetaStatusValue.INTERNAL_ERROR, f"exception {type(e)} occurred"),
)
return
def _unzip_data(self, download_dir, job_data, job_id):
job_id_dir = os.path.join(download_dir, job_id)
if os.path.exists(job_id_dir):
shutil.rmtree(job_id_dir)
os.mkdir(job_id_dir)
data_bytes = job_data[JobDataKey.JOB_DATA.value]
job_dir = os.path.join(job_id_dir, "job")
os.mkdir(job_dir)
unzip_all_from_bytes(data_bytes, job_dir)
workspace_bytes = job_data[JobDataKey.WORKSPACE_DATA.value]
workspace_dir = os.path.join(job_id_dir, "workspace")
os.mkdir(workspace_dir)
if workspace_bytes is not None:
unzip_all_from_bytes(workspace_bytes, workspace_dir)
return job_id_dir
def pull_file(self, conn: Connection, args: List[str]):
if len(args) != 3:
self.logger.error("syntax error: missing file name")
return
self.download_file(conn, file_name=args[2])
def download_job(self, conn: Connection, args: List[str]):
job_id = args[1]
download_dir = conn.get_prop(ConnProps.DOWNLOAD_DIR)
self.logger.debug(f"pull_job called for {job_id}")
engine = conn.app_ctx
job_def_manager = engine.job_def_manager
if not isinstance(job_def_manager, JobDefManagerSpec):
self.logger.error(
f"job_def_manager in engine is not of type JobDefManagerSpec, but got {type(job_def_manager)}"
)
conn.append_error("internal error", meta=make_meta(MetaStatusValue.INTERNAL_ERROR))
return
with engine.new_context() as fl_ctx:
job_def_manager.get_storage_for_download(job_id, download_dir, DATA, JOB_ZIP, fl_ctx)
job_def_manager.get_storage_for_download(job_id, download_dir, META, META_JSON, fl_ctx)
job_def_manager.get_storage_for_download(job_id, download_dir, WORKSPACE, WORKSPACE_ZIP, fl_ctx)
self.download_folder(
conn, job_id, download_file_cmd_name=AdminCommandNames.DOWNLOAD_JOB_FILE, control_id=job_id
)
| NVFlare-main | nvflare/private/fed/server/job_cmds.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FL Admin commands."""
import copy
import logging
import time
from abc import ABC, abstractmethod
from typing import List
from nvflare.apis.fl_constant import (
AdminCommandNames,
FLContextKey,
MachineStatus,
ServerCommandKey,
ServerCommandNames,
)
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.utils.fl_context_utils import get_serializable_data
from nvflare.private.defs import SpecialTaskName, TaskConstant
from nvflare.widgets.widget import WidgetID
NO_OP_REPLY = "__no_op_reply"
class CommandProcessor(ABC):
"""The CommandProcessor is responsible for processing a command from parent process."""
def __init__(self) -> None:
self.logger = logging.getLogger(self.__class__.__name__)
@abstractmethod
def get_command_name(self) -> str:
"""Gets the command name that this processor will handle.
Returns:
name of the command
"""
pass
@abstractmethod
def process(self, data: Shareable, fl_ctx: FLContext):
"""Processes the data.
Args:
data: process data
fl_ctx: FLContext
Return:
A reply message
"""
pass
class ServerStateCheck(ABC):
"""Server command requires the server state check"""
@abstractmethod
def get_state_check(self, fl_ctx: FLContext) -> dict:
"""Get the state check data for the server command.
Args:
fl_ctx: FLContext
Returns: server state check dict data
"""
pass
class AbortCommand(CommandProcessor):
"""To implement the abort command."""
def get_command_name(self) -> str:
"""To get the command name.
Returns: AdminCommandNames.ABORT
"""
return AdminCommandNames.ABORT
def process(self, data: Shareable, fl_ctx: FLContext):
"""Called to process the abort command.
Args:
data: process data
fl_ctx: FLContext
Returns: abort command message
"""
server_runner = fl_ctx.get_prop(FLContextKey.RUNNER)
# for HA server switch over
turn_to_cold = data.get_header(ServerCommandKey.TURN_TO_COLD, False)
if server_runner:
server_runner.abort(fl_ctx=fl_ctx, turn_to_cold=turn_to_cold)
# wait for the runner process gracefully abort the run.
engine = fl_ctx.get_engine()
start_time = time.time()
while engine.engine_info.status != MachineStatus.STOPPED:
time.sleep(1.0)
if time.time() - start_time > 30.0:
break
return "Aborted the run"
class GetRunInfoCommand(CommandProcessor):
"""Implements the GET_RUN_INFO command."""
def get_command_name(self) -> str:
return ServerCommandNames.GET_RUN_INFO
def process(self, data: Shareable, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
run_info = engine.get_run_info()
if run_info:
return run_info
return NO_OP_REPLY
class GetTaskCommand(CommandProcessor, ServerStateCheck):
"""To implement the server GetTask command."""
def get_command_name(self) -> str:
"""To get the command name.
Returns: ServerCommandNames.GET_TASK
"""
return ServerCommandNames.GET_TASK
def process(self, data: Shareable, fl_ctx: FLContext):
"""Called to process the abort command.
Args:
data: process data
fl_ctx: FLContext
Returns: task data
"""
start_time = time.time()
shared_fl_ctx = data.get_header(ServerCommandKey.PEER_FL_CONTEXT)
data.set_header(ServerCommandKey.PEER_FL_CONTEXT, FLContext())
client = data.get_header(ServerCommandKey.FL_CLIENT)
self.logger.debug(f"Got the GET_TASK request from client: {client.name}")
fl_ctx.set_peer_context(shared_fl_ctx)
server_runner = fl_ctx.get_prop(FLContextKey.RUNNER)
if not server_runner:
# this is possible only when the client request is received before the
# server_app_runner.start_server_app is called in runner_process.py
# We ask the client to try again later.
taskname = SpecialTaskName.TRY_AGAIN
task_id = ""
shareable = Shareable()
shareable.set_header(TaskConstant.WAIT_TIME, 1.0)
else:
taskname, task_id, shareable = server_runner.process_task_request(client, fl_ctx)
# we need TASK_ID back as a cookie
if not shareable:
shareable = Shareable()
shareable.add_cookie(name=FLContextKey.TASK_ID, data=task_id)
# we also need to make TASK_ID available to the client
shareable.set_header(key=FLContextKey.TASK_ID, value=task_id)
shareable.set_header(key=ServerCommandKey.TASK_NAME, value=taskname)
shared_fl_ctx = FLContext()
shared_fl_ctx.set_public_props(copy.deepcopy(get_serializable_data(fl_ctx).get_all_public_props()))
shareable.set_header(key=FLContextKey.PEER_CONTEXT, value=shared_fl_ctx)
if taskname != SpecialTaskName.TRY_AGAIN:
self.logger.info(
f"return task to client. client_name: {client.name} task_name: {taskname} task_id: {task_id} "
f"sharable_header_task_id: {shareable.get_header(key=FLContextKey.TASK_ID)}"
)
self.logger.debug(f"Get_task processing time: {time.time()-start_time} for client: {client.name}")
return shareable
def get_state_check(self, fl_ctx: FLContext) -> dict:
engine = fl_ctx.get_engine()
server_state = engine.server.server_state
return server_state.get_task(fl_ctx)
class SubmitUpdateCommand(CommandProcessor, ServerStateCheck):
"""To implement the server GetTask command."""
def get_command_name(self) -> str:
"""To get the command name.
Returns: ServerCommandNames.SUBMIT_UPDATE
"""
return ServerCommandNames.SUBMIT_UPDATE
def process(self, data: Shareable, fl_ctx: FLContext):
"""Called to process the abort command.
Args:
data: process data
fl_ctx: FLContext
Returns:
"""
start_time = time.time()
shared_fl_ctx = data.get_header(ServerCommandKey.PEER_FL_CONTEXT)
data.set_header(ServerCommandKey.PEER_FL_CONTEXT, FLContext())
shared_fl_ctx.set_prop(FLContextKey.SHAREABLE, data, private=True)
client = data.get_header(ServerCommandKey.FL_CLIENT)
fl_ctx.set_peer_context(shared_fl_ctx)
contribution_task_name = data.get_header(FLContextKey.TASK_NAME)
task_id = data.get_cookie(FLContextKey.TASK_ID)
server_runner = fl_ctx.get_prop(FLContextKey.RUNNER)
server_runner.process_submission(client, contribution_task_name, task_id, data, fl_ctx)
self.logger.info(f"submit_update process. client_name:{client.name} task_id:{task_id}")
self.logger.debug(f"Submit_result processing time: {time.time()-start_time} for client: {client.name}")
return ""
def get_state_check(self, fl_ctx: FLContext) -> dict:
engine = fl_ctx.get_engine()
server_state = engine.server.server_state
return server_state.submit_result(fl_ctx)
class HandleDeadJobCommand(CommandProcessor):
"""To implement the server HandleDeadJob command."""
def get_command_name(self) -> str:
"""To get the command name.
Returns: ServerCommandNames.SUBMIT_UPDATE
"""
return ServerCommandNames.HANDLE_DEAD_JOB
def process(self, data: Shareable, fl_ctx: FLContext):
"""Called to process the HandleDeadJob command.
Args:
data: process data
fl_ctx: FLContext
Returns:
"""
client_name = data.get_header(ServerCommandKey.FL_CLIENT)
server_runner = fl_ctx.get_prop(FLContextKey.RUNNER)
if server_runner:
server_runner.handle_dead_job(client_name, fl_ctx)
return ""
class ShowStatsCommand(CommandProcessor):
"""To implement the show_stats command."""
def get_command_name(self) -> str:
"""To get the command name.
Returns: ServerCommandNames.SHOW_STATS
"""
return ServerCommandNames.SHOW_STATS
def process(self, data: Shareable, fl_ctx: FLContext):
"""Called to process the abort command.
Args:
data: process data
fl_ctx: FLContext
Returns: Engine run_info
"""
engine = fl_ctx.get_engine()
collector = engine.get_widget(WidgetID.INFO_COLLECTOR)
return collector.get_run_stats()
class GetErrorsCommand(CommandProcessor):
"""To implement the show_errors command."""
def get_command_name(self) -> str:
"""To get the command name.
Returns: ServerCommandNames.GET_ERRORS
"""
return ServerCommandNames.GET_ERRORS
def process(self, data: Shareable, fl_ctx: FLContext):
"""Called to process the abort command.
Args:
data: process data
fl_ctx: FLContext
Returns: Engine run_info
"""
engine = fl_ctx.get_engine()
collector = engine.get_widget(WidgetID.INFO_COLLECTOR)
errors = collector.get_errors()
if not errors:
errors = "No Error"
return errors
class ResetErrorsCommand(CommandProcessor):
"""To implement the show_errors command."""
def get_command_name(self) -> str:
"""To get the command name.
Returns: ServerCommandNames.GET_ERRORS
"""
return ServerCommandNames.RESET_ERRORS
def process(self, data: Shareable, fl_ctx: FLContext):
"""Called to process the abort command.
Args:
data: process data
fl_ctx: FLContext
"""
engine = fl_ctx.get_engine()
collector = engine.get_widget(WidgetID.INFO_COLLECTOR)
collector.reset_errors()
return None
class ByeCommand(CommandProcessor):
"""To implement the ShutdownCommand."""
def get_command_name(self) -> str:
"""To get the command name.
Returns: AdminCommandNames.SHUTDOWN
"""
return AdminCommandNames.SHUTDOWN
def process(self, data: Shareable, fl_ctx: FLContext):
"""Called to process the Shutdown command.
Args:
data: process data
fl_ctx: FLContext
Returns: Shutdown command message
"""
return None
class HeartbeatCommand(CommandProcessor):
"""To implement the HEARTBEATCommand."""
def get_command_name(self) -> str:
"""To get the command name.
Returns: AdminCommandNames.HEARTBEAT
"""
return ServerCommandNames.HEARTBEAT
def process(self, data: Shareable, fl_ctx: FLContext):
"""Called to process the HEARTBEAT command.
Args:
data: process data
fl_ctx: FLContext
"""
return None
class ServerStateCommand(CommandProcessor):
"""To implement the ServerStateCommand."""
def get_command_name(self) -> str:
"""To get the command name.
Returns: AdminCommandNames.SERVER_STATE
"""
return ServerCommandNames.SERVER_STATE
def process(self, data: Shareable, fl_ctx: FLContext):
"""Called to process the SERVER_STATE command.
Args:
data: ServerState object
fl_ctx: FLContext
"""
engine = fl_ctx.get_engine()
engine.server.server_state = data
return "Success"
class ServerCommands(object):
"""AdminCommands contains all the commands for processing the commands from the parent process."""
commands: List[CommandProcessor] = [
AbortCommand(),
ByeCommand(),
GetRunInfoCommand(),
GetTaskCommand(),
SubmitUpdateCommand(),
HandleDeadJobCommand(),
ShowStatsCommand(),
GetErrorsCommand(),
ResetErrorsCommand(),
HeartbeatCommand(),
ServerStateCommand(),
]
client_request_commands_names = [
ServerCommandNames.GET_TASK,
ServerCommandNames.SUBMIT_UPDATE,
# ServerCommandNames.AUX_COMMUNICATE,
]
@staticmethod
def get_command(command_name):
"""Call to return the AdminCommand object.
Args:
command_name: AdminCommand name
Returns: AdminCommand object
"""
for command in ServerCommands.commands:
if command_name == command.get_command_name():
return command
return None
@staticmethod
def register_command(command_processor: CommandProcessor):
"""Call to register the AdminCommand processor.
Args:
command_processor: AdminCommand processor
"""
if not isinstance(command_processor, CommandProcessor):
raise TypeError(
"command_processor must be an instance of CommandProcessor, but got {}".format(type(command_processor))
)
ServerCommands.commands.append(command_processor)
| NVFlare-main | nvflare/private/fed/server/server_commands.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import time
from typing import List
from nvflare.apis.client import Client
from nvflare.apis.fl_constant import AdminCommandNames
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.proto import ConfirmMethod, MetaKey, MetaStatusValue, make_meta
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandSpec
from nvflare.private.admin_defs import MsgHeader, ReturnCode
from nvflare.private.defs import ClientStatusKey, ScopeInfoKey, TrainingTopic
from nvflare.private.fed.server.admin import new_message
from nvflare.private.fed.server.server_engine_internal_spec import ServerEngineInternalSpec
from nvflare.private.fed.utils.fed_utils import get_scope_info
from nvflare.security.logging import secure_format_exception
from .cmd_utils import CommandUtil
from .server_engine import ServerEngine
class TrainingCommandModule(CommandModule, CommandUtil):
def __init__(self):
"""A class for training commands."""
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def get_spec(self):
return CommandModuleSpec(
name="training",
cmd_specs=[
CommandSpec(
name=AdminCommandNames.CHECK_STATUS,
description="check status of the FL server/client",
usage="check_status server|client",
handler_func=self.check_status,
authz_func=self.authorize_server_operation,
visible=True,
),
CommandSpec(
name=AdminCommandNames.REMOVE_CLIENT,
description="remove a FL client",
usage="remove_client <client-name>",
handler_func=self.remove_client,
authz_func=self.authorize_client_operation,
visible=True,
confirm=ConfirmMethod.AUTH,
),
CommandSpec(
name=AdminCommandNames.ADMIN_CHECK_STATUS,
description="check status for project admin",
usage="admin_check_status server|client",
handler_func=self.check_status,
authz_func=self.must_be_project_admin,
visible=False,
),
CommandSpec(
name=AdminCommandNames.SHUTDOWN,
description="shutdown the FL server/client",
usage="shutdown server|client|all",
handler_func=self.shutdown,
authz_func=self.authorize_server_operation,
visible=True,
confirm=ConfirmMethod.AUTH,
),
CommandSpec(
name=AdminCommandNames.RESTART,
description="restart FL server and/or clients",
usage="restart server|client|all [clients]",
handler_func=self.restart,
authz_func=self.authorize_server_operation,
visible=True,
confirm=ConfirmMethod.AUTH,
),
CommandSpec(
name=AdminCommandNames.SHOW_SCOPES,
description="show configured scope names on server/client",
usage="show_scopes server|client|all ...",
handler_func=self.show_scopes,
authz_func=self.authorize_server_operation,
visible=True,
),
],
)
# Shutdown
def _shutdown_app_on_server(self, conn: Connection) -> str:
engine = conn.app_ctx
err = engine.shutdown_server()
if err:
conn.append_error(err)
return err
else:
conn.append_string("FL app has been shutdown.")
conn.append_shutdown("Bye bye")
return ""
def _shutdown_app_on_clients(self, conn: Connection) -> bool:
message = new_message(conn, topic=TrainingTopic.SHUTDOWN, body="", require_authz=True)
clients = conn.get_prop(self.TARGET_CLIENT_TOKENS, None)
if not clients:
# no clients to shut down - this is okay
return True
replies = self.send_request_to_clients(conn, message)
self.process_replies_to_table(conn, replies)
clients_to_be_removed = set(clients)
for r in replies:
if r.reply and r.reply.get_header(MsgHeader.RETURN_CODE) == ReturnCode.ERROR:
clients_to_be_removed.remove(r.client_token)
result = True
if clients_to_be_removed != set(clients):
# means some clients can not be shutdown
result = False
return result
def shutdown(self, conn: Connection, args: List[str]):
target_type = args[1]
engine = conn.app_ctx
if not isinstance(engine, ServerEngine):
raise TypeError("engine must be ServerEngine but got {}".format(type(engine)))
for _, job in engine.job_runner.running_jobs.items():
if not job.run_aborted:
conn.append_error(
"There are still jobs running. Please let them finish or abort_job before shutdown.",
meta=make_meta(MetaStatusValue.JOB_RUNNING, info=job.job_id),
)
return
if target_type == self.TARGET_TYPE_SERVER:
if engine.get_clients():
conn.append_error(
"There are still active clients. Shutdown all clients first.",
meta=make_meta(MetaStatusValue.CLIENTS_RUNNING),
)
return
if target_type in [self.TARGET_TYPE_CLIENT, self.TARGET_TYPE_ALL]:
# must shut down clients first
success = self._shutdown_app_on_clients(conn)
if not success:
conn.update_meta(make_meta(MetaStatusValue.ERROR, "failed to shut down all clients"))
return
if target_type in [self.TARGET_TYPE_SERVER, self.TARGET_TYPE_ALL]:
# shut down the server
err = self._shutdown_app_on_server(conn)
if err:
conn.update_meta(make_meta(MetaStatusValue.ERROR, info=err))
return
conn.append_success("")
# Remove Clients
def remove_client(self, conn: Connection, args: List[str]):
engine = conn.app_ctx
if not isinstance(engine, ServerEngineInternalSpec):
raise TypeError("engine must be ServerEngineInternalSpec but got {}".format(type(engine)))
clients = conn.get_prop(self.TARGET_CLIENT_TOKENS)
err = engine.remove_clients(clients)
if err:
conn.append_error(err)
return
conn.append_success("")
# Restart
def _restart_clients(self, conn) -> str:
engine = conn.app_ctx
if not isinstance(engine, ServerEngineInternalSpec):
raise TypeError("engine must be ServerEngineInternalSpec but got {}".format(type(engine)))
message = new_message(conn, topic=TrainingTopic.RESTART, body="", require_authz=True)
replies = self.send_request_to_clients(conn, message)
# engine.remove_clients(clients)
return self._process_replies_to_string(conn, replies)
def restart(self, conn: Connection, args: List[str]):
engine = conn.app_ctx
if not isinstance(engine, ServerEngine):
raise TypeError("engine must be ServerEngine but got {}".format(type(engine)))
if engine.job_runner.running_jobs:
msg = "There are still jobs running. Please let them finish or abort_job before restart."
conn.append_error(msg, meta=make_meta(MetaStatusValue.JOB_RUNNING, msg))
return
target_type = args[1]
if target_type in [self.TARGET_TYPE_SERVER, self.TARGET_TYPE_ALL]:
clients = engine.get_clients()
if clients:
conn.append_string("Trying to restart all clients before restarting server...")
tokens = [c.token for c in clients]
conn.set_prop(
self.TARGET_CLIENT_TOKENS, tokens
) # need this because not set in validate_command_targets when target_type == self.TARGET_TYPE_SERVER
response = self._restart_clients(conn)
conn.append_string(response)
# check with Isaac - no need to wait!
# time.sleep(5)
err = engine.restart_server()
if err:
conn.append_error(err, meta={MetaKey.SERVER_STATUS: MetaStatusValue.ERROR, MetaKey.INFO: err})
else:
conn.append_string("Server scheduled for restart", meta={MetaKey.SERVER_STATUS: MetaStatusValue.OK})
elif target_type == self.TARGET_TYPE_CLIENT:
clients = conn.get_prop(self.TARGET_CLIENT_TOKENS)
if not clients:
conn.append_error("no clients available", meta=make_meta(MetaStatusValue.NO_CLIENTS, "no clients"))
return
else:
response = self._restart_clients(conn)
conn.append_string(response)
conn.append_success("")
# Check status
def check_status(self, conn: Connection, args: List[str]):
# TODO:: Need more discussion on what status to be shown
engine = conn.app_ctx
if not isinstance(engine, ServerEngineInternalSpec):
raise TypeError("engine must be ServerEngineInternalSpec but got {}".format(type(engine)))
dst = args[1]
if dst in [self.TARGET_TYPE_SERVER, self.TARGET_TYPE_ALL]:
engine_info = engine.get_engine_info()
conn.append_string(
f"Engine status: {engine_info.status.value}",
meta=make_meta(
MetaStatusValue.OK,
extra={
MetaKey.SERVER_STATUS: engine_info.status.value,
MetaKey.SERVER_START_TIME: engine_info.start_time,
},
),
)
table = conn.append_table(["job_id", "app name"], name=MetaKey.JOBS)
for job_id, app_name in engine_info.app_names.items():
table.add_row([job_id, app_name], meta={MetaKey.APP_NAME: app_name, MetaKey.JOB_ID: job_id})
clients = engine.get_clients()
conn.append_string("Registered clients: {} ".format(len(clients)))
if clients:
table = conn.append_table(["client", "token", "last connect time"], name=MetaKey.CLIENTS)
for c in clients:
if not isinstance(c, Client):
raise TypeError("c must be Client but got {}".format(type(c)))
table.add_row(
[c.name, str(c.token), time.asctime(time.localtime(c.last_connect_time))],
meta={MetaKey.CLIENT_NAME: c.name, MetaKey.CLIENT_LAST_CONNECT_TIME: c.last_connect_time},
)
if dst in [self.TARGET_TYPE_CLIENT, self.TARGET_TYPE_ALL]:
message = new_message(conn, topic=TrainingTopic.CHECK_STATUS, body="", require_authz=True)
replies = self.send_request_to_clients(conn, message)
self._process_client_status_replies(conn, replies)
if dst not in [self.TARGET_TYPE_ALL, self.TARGET_TYPE_CLIENT, self.TARGET_TYPE_SERVER]:
conn.append_error(
f"invalid target type {dst}. Usage: check_status server|client ...",
meta=make_meta(MetaStatusValue.SYNTAX_ERROR, f"invalid target type {dst}"),
)
def _process_client_status_replies(self, conn, replies):
if not replies:
conn.append_error("no responses from clients")
return
table = conn.append_table(["client", "app_name", "job_id", "status"], name=MetaKey.CLIENT_STATUS)
for r in replies:
job_id = "?"
app_name = "?"
client_name = r.client_name
if r.reply:
if r.reply.get_header(MsgHeader.RETURN_CODE) == ReturnCode.ERROR:
table.add_row(
[client_name, app_name, job_id, r.reply.body],
meta={MetaKey.CLIENT_NAME: client_name, MetaKey.STATUS: MetaStatusValue.ERROR},
)
else:
try:
body = json.loads(r.reply.body)
if isinstance(body, dict):
running_jobs = body.get(ClientStatusKey.RUNNING_JOBS)
if running_jobs:
for job in running_jobs:
app_name = job.get(ClientStatusKey.APP_NAME, "?")
job_id = job.get(ClientStatusKey.JOB_ID, "?")
status = job.get(ClientStatusKey.STATUS, "?")
table.add_row(
[client_name, app_name, job_id, status],
meta={
MetaKey.CLIENT_NAME: client_name,
MetaKey.APP_NAME: app_name,
MetaKey.JOB_ID: job_id,
MetaKey.STATUS: status,
},
)
else:
table.add_row(
[client_name, app_name, job_id, "No Jobs"],
meta={MetaKey.CLIENT_NAME: client_name, MetaKey.STATUS: MetaStatusValue.NO_JOBS},
)
except Exception as e:
self.logger.error(f"Bad reply from client: {secure_format_exception(e)}")
else:
table.add_row(
[client_name, app_name, job_id, "No Reply"],
meta={MetaKey.CLIENT_NAME: client_name, MetaKey.STATUS: MetaStatusValue.NO_REPLY},
)
def _add_scope_info(self, table, site_name, scope_names: List[str], default_scope: str):
if not scope_names:
names = ""
else:
names = ", ".join(scope_names)
table.add_row([site_name, names, default_scope])
def _process_scope_replies(self, table, conn, replies):
if not replies:
conn.append_error("no responses from clients")
return
for r in replies:
client_name = r.client_name
if r.reply:
if r.reply.get_header(MsgHeader.RETURN_CODE) == ReturnCode.ERROR:
self._add_scope_info(table, client_name, r.reply.body, "")
else:
try:
body = json.loads(r.reply.body)
if isinstance(body, dict):
scope_names = body.get(ScopeInfoKey.SCOPE_NAMES)
default_scope = body.get(ScopeInfoKey.DEFAULT_SCOPE)
self._add_scope_info(table, client_name, scope_names, default_scope)
else:
conn.append_error(
f"bad response from client {client_name}: expect dict but got {type(body)}"
)
except Exception as e:
self.logger.error(f"Bad reply from client: {secure_format_exception(e)}")
conn.append_error(f"bad response from client {client_name}: {secure_format_exception(e)}")
else:
self._add_scope_info(table, client_name, [], "no reply")
def show_scopes(self, conn: Connection, args: List[str]):
engine = conn.app_ctx
if not isinstance(engine, ServerEngineInternalSpec):
raise TypeError("engine must be ServerEngineInternalSpec but got {}".format(type(engine)))
dst = args[1]
table = conn.append_table(["site", "scopes", "default"])
if dst in [self.TARGET_TYPE_SERVER, self.TARGET_TYPE_ALL]:
# get the server's scope info
scope_names, default_scope_name = get_scope_info()
self._add_scope_info(table, "server", scope_names, default_scope_name)
if dst in [self.TARGET_TYPE_CLIENT, self.TARGET_TYPE_ALL]:
message = new_message(conn, topic=TrainingTopic.GET_SCOPES, body="", require_authz=True)
replies = self.send_request_to_clients(conn, message)
self._process_scope_replies(table, conn, replies)
| NVFlare-main | nvflare/private/fed/server/training_cmds.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import subprocess
from typing import List
from nvflare.fuel.hci.cmd_arg_utils import join_args
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.proto import MetaStatusValue, make_meta
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandSpec
from nvflare.fuel.hci.server.authz import PreAuthzReturnCode
from nvflare.fuel.hci.shell_cmd_val import (
CatValidator,
GrepValidator,
HeadValidator,
LsValidator,
ShellCommandValidator,
TailValidator,
)
from nvflare.private.admin_defs import Message
from nvflare.private.defs import SysCommandTopic
from nvflare.private.fed.server.admin import new_message
from nvflare.private.fed.server.message_send import ClientReply
from nvflare.private.fed.server.server_engine_internal_spec import ServerEngineInternalSpec
class _CommandExecutor(object):
def __init__(self, cmd_name: str, validator: ShellCommandValidator):
self.cmd_name = cmd_name
self.validator = validator
def authorize_command(self, conn: Connection, args: List[str]):
if len(args) < 2:
conn.append_error("syntax error: missing target")
return PreAuthzReturnCode.ERROR
shell_cmd_args = [self.cmd_name]
for a in args[2:]:
shell_cmd_args.append(a)
shell_cmd = join_args(shell_cmd_args)
result = None
if self.validator:
err, result = self.validator.validate(shell_cmd_args[1:])
if len(err) > 0:
conn.append_error(err)
return PreAuthzReturnCode.ERROR
# validate the command and make sure file destinations are protected
err = self.validate_shell_command(shell_cmd_args, result)
if len(err) > 0:
conn.append_error(err)
return PreAuthzReturnCode.ERROR
site_name = args[1]
conn.set_prop("shell_cmd", shell_cmd)
conn.set_prop("target_site", site_name)
if site_name == "server":
return PreAuthzReturnCode.REQUIRE_AUTHZ
else:
# client site authorization will be done by the client itself
return PreAuthzReturnCode.OK
def validate_shell_command(self, args: List[str], parse_result) -> str:
return ""
def execute_command(self, conn: Connection, args: List[str]):
target = conn.get_prop("target_site")
shell_cmd = conn.get_prop("shell_cmd")
if target == "server":
# run the shell command on server
output = subprocess.getoutput(shell_cmd)
conn.append_string(output)
return
engine = conn.app_ctx
if not isinstance(engine, ServerEngineInternalSpec):
raise TypeError("engine must be ServerEngineInternalSpec but got {}".format(type(engine)))
clients, invalid_inputs = engine.validate_targets([target])
if len(invalid_inputs) > 0:
msg = f"invalid target: {target}"
conn.append_error(msg, meta=make_meta(MetaStatusValue.INVALID_TARGET, info=msg))
return
if len(clients) > 1:
msg = "this command can only be applied to one client at a time"
conn.append_error(msg, meta=make_meta(MetaStatusValue.INVALID_TARGET, info=msg))
return
valid_tokens = []
for c in clients:
valid_tokens.append(c.token)
req = new_message(conn=conn, topic=SysCommandTopic.SHELL, body=shell_cmd, require_authz=True)
server = conn.server
reply = server.send_request_to_client(req, valid_tokens[0], timeout_secs=server.timeout)
if reply is None:
conn.append_error(
"no reply from client - timed out", meta=make_meta(MetaStatusValue.INTERNAL_ERROR, "client timeout")
)
return
if not isinstance(reply, ClientReply):
raise TypeError("reply must be ClientReply but got {}".format(type(reply)))
if reply.reply is None:
conn.append_error(
"no reply from client - timed out", meta=make_meta(MetaStatusValue.INTERNAL_ERROR, "client timeout")
)
return
if not isinstance(reply.reply, Message):
raise TypeError("reply in ClientReply must be Message but got {}".format(type(reply.reply)))
conn.append_string(reply.reply.body)
def get_usage(self):
if self.validator:
return self.validator.get_usage()
else:
return ""
class _NoArgCmdExecutor(_CommandExecutor):
def __init__(self, cmd_name: str):
_CommandExecutor.__init__(self, cmd_name, None)
def validate_shell_command(self, args: List[str], parse_result):
if len(args) != 1:
return "this command does not accept extra args"
return ""
class _FileCmdExecutor(_CommandExecutor):
def __init__(
self,
cmd_name: str,
validator: ShellCommandValidator,
text_file_only: bool = True,
single_file_only: bool = True,
file_required: bool = True,
):
_CommandExecutor.__init__(self, cmd_name, validator)
self.text_file_only = text_file_only
self.single_file_only = single_file_only
self.file_required = file_required
def validate_shell_command(self, args: List[str], parse_result):
if self.file_required or parse_result.files:
if not hasattr(parse_result, "files"):
return "a file is required as an argument"
if self.single_file_only and len(parse_result.files) != 1:
return "only one file is allowed"
if isinstance(parse_result.files, list):
file_list = parse_result.files
else:
file_list = [parse_result.files]
for f in file_list:
if not isinstance(f, str):
raise TypeError("file must be str but got {}".format(type(f)))
if not re.match("^[A-Za-z0-9-._/]*$", f):
return "unsupported file {}".format(f)
if f.startswith("/"):
return "absolute path is not allowed"
paths = f.split("/")
for p in paths:
if p == "..":
return ".. in path name is not allowed"
if self.text_file_only:
basename, file_extension = os.path.splitext(f)
if file_extension not in [".txt", ".log", ".json", ".csv", ".sh", ".config", ".py"]:
return (
"this command cannot be applied to file {}. Only files with the following extensions "
"are permitted: .txt, .log, .json, .csv, .sh, .config, .py".format(f)
)
return ""
class ShellCommandModule(CommandModule):
def get_spec(self):
pwd_exe = _NoArgCmdExecutor("pwd")
ls_exe = _FileCmdExecutor(
"ls", LsValidator(), text_file_only=False, single_file_only=False, file_required=False
)
cat_exe = _FileCmdExecutor("cat", CatValidator())
head_exe = _FileCmdExecutor("head", HeadValidator())
tail_exe = _FileCmdExecutor("tail", TailValidator())
grep_exe = _FileCmdExecutor("grep", GrepValidator())
return CommandModuleSpec(
name="sys",
cmd_specs=[
CommandSpec(
name="pwd",
description="print the name of work directory",
usage="pwd target\n" + 'where target is "server" or client name\n' + pwd_exe.get_usage(),
handler_func=pwd_exe.execute_command,
authz_func=pwd_exe.authorize_command,
visible=True,
),
CommandSpec(
name="ls",
description="list files in work dir",
usage="ls target [options] [files]\n "
+ 'where target is "server" or client name\n'
+ ls_exe.get_usage(),
handler_func=ls_exe.execute_command,
authz_func=ls_exe.authorize_command,
visible=True,
),
CommandSpec(
name="cat",
description="show content of a file",
usage="cat target [options] fileName\n "
+ 'where target is "server" or client name\n'
+ cat_exe.get_usage(),
handler_func=cat_exe.execute_command,
authz_func=cat_exe.authorize_command,
visible=True,
),
CommandSpec(
name="head",
description="print the first 10 lines of a file",
usage="head target [options] fileName\n "
+ 'where target is "server" or client name\n'
+ head_exe.get_usage(),
handler_func=head_exe.execute_command,
authz_func=head_exe.authorize_command,
visible=True,
),
CommandSpec(
name="tail",
description="print the last 10 lines of a file",
usage="tail target [options] fileName\n "
+ 'where target is "server" or client name\n'
+ tail_exe.get_usage(),
handler_func=tail_exe.execute_command,
authz_func=tail_exe.authorize_command,
visible=True,
),
CommandSpec(
name="grep",
description="search for PATTERN in a file.",
usage="grep target [options] PATTERN fileName\n "
+ 'where target is "server" or client name\n'
+ grep_exe.get_usage(),
handler_func=grep_exe.execute_command,
authz_func=grep_exe.authorize_command,
visible=True,
),
],
)
| NVFlare-main | nvflare/private/fed/server/shell_cmd.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nvflare.apis.fl_constant import FLContextKey, MachineStatus
from nvflare.apis.fl_context import FLContext
from nvflare.apis.workspace import Workspace
from nvflare.private.fed.app.fl_conf import create_privacy_manager
from nvflare.private.fed.runner import Runner
from nvflare.private.fed.server.server_engine import ServerEngine
from nvflare.private.fed.server.server_json_config import ServerJsonConfigurator
from nvflare.private.fed.server.server_status import ServerStatus
from nvflare.private.fed.utils.fed_utils import authorize_build_component
from nvflare.private.privacy_manager import PrivacyService
from nvflare.security.logging import secure_format_exception
def _set_up_run_config(workspace: Workspace, server, conf):
runner_config = conf.runner_config
# configure privacy control!
privacy_manager = create_privacy_manager(workspace, names_only=False, is_server=True)
if privacy_manager.is_policy_defined():
if privacy_manager.components:
for cid, comp in privacy_manager.components.items():
runner_config.add_component(cid, comp)
# initialize Privacy Service
PrivacyService.initialize(privacy_manager)
server.heart_beat_timeout = conf.heartbeat_timeout
server.runner_config = conf.runner_config
server.handlers = conf.handlers
class ServerAppRunner(Runner):
def __init__(self, server) -> None:
super().__init__()
self.server = server
def start_server_app(
self, workspace: Workspace, args, app_root, job_id, snapshot, logger, kv_list=None, event_handlers=None
):
try:
server_config_file_name = os.path.join(app_root, args.server_config)
conf = ServerJsonConfigurator(
config_file_name=server_config_file_name, app_root=app_root, args=args, kv_list=kv_list
)
if event_handlers:
fl_ctx = FLContext()
fl_ctx.set_prop(FLContextKey.ARGS, args, sticky=False)
fl_ctx.set_prop(FLContextKey.APP_ROOT, app_root, private=True, sticky=False)
fl_ctx.set_prop(FLContextKey.WORKSPACE_OBJECT, workspace, private=True)
fl_ctx.set_prop(FLContextKey.CURRENT_JOB_ID, job_id, private=False, sticky=False)
fl_ctx.set_prop(FLContextKey.CURRENT_RUN, job_id, private=False, sticky=False)
conf.set_component_build_authorizer(
authorize_build_component, fl_ctx=fl_ctx, event_handlers=event_handlers
)
conf.configure()
_set_up_run_config(workspace, self.server, conf)
if not isinstance(self.server.engine, ServerEngine):
raise TypeError(f"server.engine must be ServerEngine. Got type:{type(self.server.engine).__name__}")
self.sync_up_parents_process(args)
self.server.start_run(job_id, app_root, conf, args, snapshot)
except Exception as e:
with self.server.engine.new_context() as fl_ctx:
fl_ctx.set_prop(key=FLContextKey.FATAL_SYSTEM_ERROR, value=True, private=True, sticky=True)
logger.exception(f"FL server execution exception: {secure_format_exception(e)}")
raise e
finally:
self.update_job_run_status()
self.server.status = ServerStatus.STOPPED
self.server.engine.engine_info.status = MachineStatus.STOPPED
self.server.stop_training()
def sync_up_parents_process(self, args):
self.server.engine.sync_clients_from_main_process()
def update_job_run_status(self):
self.server.engine.update_job_run_status()
def stop(self):
self.server.engine.asked_to_stop = True
| NVFlare-main | nvflare/private/fed/server/server_app_runner.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from nvflare.apis.client import Client
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FilterKey, FLContextKey, ReservedKey, ReservedTopic, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.server_engine_spec import ServerEngineSpec
from nvflare.apis.shareable import ReservedHeaderKey, Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.apis.utils.fl_context_utils import add_job_audit_event
from nvflare.apis.utils.task_utils import apply_filters
from nvflare.private.defs import SpecialTaskName, TaskConstant
from nvflare.private.privacy_manager import Scope
from nvflare.security.logging import secure_format_exception
from nvflare.widgets.info_collector import GroupInfoCollector, InfoCollector
class ServerRunnerConfig(object):
def __init__(
self,
heartbeat_timeout: int,
task_request_interval: float,
workflows: [],
task_data_filters: dict,
task_result_filters: dict,
handlers=None,
components=None,
):
"""Configuration for ServerRunner.
Args:
heartbeat_timeout (int): Client heartbeat timeout in seconds
task_request_interval (float): Task request interval in seconds
workflows (list): A list of workflow
task_data_filters (dict): A dict of {task_name: list of filters apply to data (pre-process)}
task_result_filters (dict): A dict of {task_name: list of filters apply to result (post-process)}
handlers (list, optional): A list of event handlers
components (dict, optional): A dict of extra python objects {id: object}
"""
self.heartbeat_timeout = heartbeat_timeout
self.task_request_interval = task_request_interval
self.workflows = workflows
self.task_data_filters = task_data_filters
self.task_result_filters = task_result_filters
self.handlers = handlers
self.components = components
def add_component(self, comp_id: str, component: object):
if not isinstance(comp_id, str):
raise TypeError(f"component id must be str but got {type(comp_id)}")
if comp_id in self.components:
raise ValueError(f"duplicate component id {comp_id}")
self.components[comp_id] = component
if isinstance(component, FLComponent):
self.handlers.append(component)
class ServerRunner(FLComponent):
ABORT_RETURN_CODES = [
ReturnCode.RUN_MISMATCH,
ReturnCode.TASK_UNKNOWN,
ReturnCode.UNSAFE_JOB,
]
def __init__(self, config: ServerRunnerConfig, job_id: str, engine: ServerEngineSpec):
"""Server runner class.
Args:
config (ServerRunnerConfig): configuration of server runner
job_id (str): The number to distinguish each experiment
engine (ServerEngineSpec): server engine
"""
FLComponent.__init__(self)
self.job_id = job_id
self.config = config
self.engine = engine
self.abort_signal = Signal()
self.wf_lock = threading.Lock()
self.current_wf = None
self.current_wf_index = 0
self.status = "init"
self.turn_to_cold = False
engine.register_aux_message_handler(
topic=ReservedTopic.SYNC_RUNNER, message_handle_func=self._handle_sync_runner
)
def _handle_sync_runner(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
# simply ack
return make_reply(ReturnCode.OK)
def _execute_run(self):
while self.current_wf_index < len(self.config.workflows):
wf = self.config.workflows[self.current_wf_index]
try:
with self.engine.new_context() as fl_ctx:
self.log_info(fl_ctx, "starting workflow {} ({}) ...".format(wf.id, type(wf.responder)))
fl_ctx.set_prop(FLContextKey.WORKFLOW, wf.id, sticky=True)
wf.responder.initialize_run(fl_ctx)
self.log_info(fl_ctx, "Workflow {} ({}) started".format(wf.id, type(wf.responder)))
self.log_debug(fl_ctx, "firing event EventType.START_WORKFLOW")
self.fire_event(EventType.START_WORKFLOW, fl_ctx)
# use the wf_lock to ensure state integrity between workflow change and message processing
with self.wf_lock:
# we only set self.current_wf to open for business after successful initialize_run!
self.current_wf = wf
with self.engine.new_context() as fl_ctx:
wf.responder.control_flow(self.abort_signal, fl_ctx)
except Exception as e:
with self.engine.new_context() as fl_ctx:
self.log_exception(fl_ctx, "Exception in workflow {}: {}".format(wf.id, secure_format_exception(e)))
self.system_panic("Exception in workflow {}: {}".format(wf.id, secure_format_exception(e)), fl_ctx)
finally:
with self.engine.new_context() as fl_ctx:
# do not execute finalize_run() until the wf_lock is acquired
with self.wf_lock:
# unset current_wf to prevent message processing
# then we can release the lock - no need to delay message processing
# during finalization!
# Note: WF finalization may take time since it needs to wait for
# the job monitor to join.
self.current_wf = None
self.log_info(fl_ctx, f"Workflow: {wf.id} finalizing ...")
try:
wf.responder.finalize_run(fl_ctx)
except Exception as e:
self.log_exception(
fl_ctx, "Error finalizing workflow {}: {}".format(wf.id, secure_format_exception(e))
)
self.log_debug(fl_ctx, "firing event EventType.END_WORKFLOW")
self.fire_event(EventType.END_WORKFLOW, fl_ctx)
# Stopped the server runner from the current responder, not continue the following responders.
if self.abort_signal.triggered:
break
self.current_wf_index += 1
def run(self):
with self.engine.new_context() as fl_ctx:
self.log_info(fl_ctx, "Server runner starting ...")
self.log_debug(fl_ctx, "firing event EventType.START_RUN")
fl_ctx.set_prop(ReservedKey.RUN_ABORT_SIGNAL, self.abort_signal, private=True, sticky=True)
self.fire_event(EventType.START_RUN, fl_ctx)
self.engine.persist_components(fl_ctx, completed=False)
self.status = "started"
try:
self._execute_run()
except Exception as e:
with self.engine.new_context() as fl_ctx:
self.log_exception(fl_ctx, f"Error executing RUN: {secure_format_exception(e)}")
finally:
# use wf_lock to ensure state of current_wf!
self.status = "done"
with self.wf_lock:
with self.engine.new_context() as fl_ctx:
self.fire_event(EventType.ABOUT_TO_END_RUN, fl_ctx)
self.log_info(fl_ctx, "ABOUT_TO_END_RUN fired")
if not self.turn_to_cold:
# ask all clients to end run!
self.engine.send_aux_request(
targets=None,
topic=ReservedTopic.END_RUN,
request=Shareable(),
timeout=0.0,
fl_ctx=fl_ctx,
optional=True,
secure=False,
)
self.engine.persist_components(fl_ctx, completed=True)
self.fire_event(EventType.END_RUN, fl_ctx)
self.log_info(fl_ctx, "END_RUN fired")
self.log_info(fl_ctx, "Server runner finished.")
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == InfoCollector.EVENT_TYPE_GET_STATS:
collector = fl_ctx.get_prop(InfoCollector.CTX_KEY_STATS_COLLECTOR)
if collector:
if not isinstance(collector, GroupInfoCollector):
raise TypeError("collector must be GroupInfoCollect but got {}".format(type(collector)))
with self.wf_lock:
if self.current_wf:
collector.set_info(
group_name="ServerRunner",
info={"job_id": self.job_id, "status": self.status, "workflow": self.current_wf.id},
)
elif event_type == EventType.FATAL_SYSTEM_ERROR:
fl_ctx.set_prop(key=FLContextKey.FATAL_SYSTEM_ERROR, value=True, private=True, sticky=True)
reason = fl_ctx.get_prop(key=FLContextKey.EVENT_DATA, default="")
self.log_error(fl_ctx, "Aborting current RUN due to FATAL_SYSTEM_ERROR received: {}".format(reason))
self.abort(fl_ctx)
def _task_try_again(self) -> (str, str, Shareable):
task_data = Shareable()
task_data.set_header(TaskConstant.WAIT_TIME, self.config.task_request_interval)
return SpecialTaskName.TRY_AGAIN, "", task_data
def process_task_request(self, client: Client, fl_ctx: FLContext) -> (str, str, Shareable):
"""Process task request from a client.
NOTE: the Engine will create a new fl_ctx and call this method:
with engine.new_context() as fl_ctx:
name, id, data = runner.process_task_request(client, fl_ctx)
...
Args:
client (Client): client object
fl_ctx (FLContext): FL context
Returns:
A tuple of (task name, task id, and task data)
"""
engine = fl_ctx.get_engine()
if not isinstance(engine, ServerEngineSpec):
raise TypeError("engine must be ServerEngineSpec but got {}".format(type(engine)))
self.log_debug(fl_ctx, "process task request from client")
if self.status == "init":
self.log_debug(fl_ctx, "server runner still initializing - asked client to try again later")
return self._task_try_again()
if self.status == "done":
self.log_info(fl_ctx, "server runner is finalizing - asked client to end the run")
return SpecialTaskName.END_RUN, "", None
peer_ctx = fl_ctx.get_peer_context()
if not isinstance(peer_ctx, FLContext):
self.log_debug(fl_ctx, "invalid task request: no peer context - asked client to try again later")
return self._task_try_again()
peer_job_id = peer_ctx.get_job_id()
if not peer_job_id or peer_job_id != self.job_id:
# the client is in a different RUN
self.log_info(fl_ctx, "invalid task request: not the same job_id - asked client to end the run")
return SpecialTaskName.END_RUN, "", None
try:
task_name, task_id, task_data = self._try_to_get_task(
# client, fl_ctx, self.config.task_request_timeout, self.config.task_retry_interval
client,
fl_ctx,
)
if not task_name or task_name == SpecialTaskName.TRY_AGAIN:
return self._task_try_again()
# filter task data
self.log_debug(fl_ctx, "firing event EventType.BEFORE_TASK_DATA_FILTER")
self.fire_event(EventType.BEFORE_TASK_DATA_FILTER, fl_ctx)
try:
filter_name = Scope.TASK_DATA_FILTERS_NAME
task_data = apply_filters(
filter_name, task_data, fl_ctx, self.config.task_data_filters, task_name, FilterKey.OUT
)
except Exception as e:
self.log_exception(
fl_ctx,
"processing error in task data filter {}; "
"asked client to try again later".format(secure_format_exception(e)),
)
with self.wf_lock:
if self.current_wf:
self.current_wf.responder.handle_exception(task_id, fl_ctx)
return self._task_try_again()
self.log_debug(fl_ctx, "firing event EventType.AFTER_TASK_DATA_FILTER")
self.fire_event(EventType.AFTER_TASK_DATA_FILTER, fl_ctx)
self.log_info(fl_ctx, f"sent task assignment to client. client_name:{client.name} task_id:{task_id}")
audit_event_id = add_job_audit_event(fl_ctx=fl_ctx, msg=f'sent task to client "{client.name}"')
task_data.set_header(ReservedHeaderKey.AUDIT_EVENT_ID, audit_event_id)
task_data.set_header(TaskConstant.WAIT_TIME, self.config.task_request_interval)
return task_name, task_id, task_data
except Exception as e:
self.log_exception(
fl_ctx,
f"Error processing client task request: {secure_format_exception(e)}; asked client to try again later",
)
return self._task_try_again()
def _try_to_get_task(self, client, fl_ctx, timeout=None, retry_interval=0.005):
start = time.time()
while True:
with self.wf_lock:
if self.current_wf is None:
self.log_debug(fl_ctx, "no current workflow - asked client to try again later")
return "", "", None
task_name, task_id, task_data = self.current_wf.responder.process_task_request(client, fl_ctx)
if task_name and task_name != SpecialTaskName.TRY_AGAIN:
if task_data:
if not isinstance(task_data, Shareable):
self.log_error(
fl_ctx,
"bad task data generated by workflow {}: must be Shareable but got {}".format(
self.current_wf.id, type(task_data)
),
)
return "", "", None
else:
task_data = Shareable()
task_data.set_header(ReservedHeaderKey.TASK_ID, task_id)
task_data.set_header(ReservedHeaderKey.TASK_NAME, task_name)
task_data.add_cookie(ReservedHeaderKey.WORKFLOW, self.current_wf.id)
fl_ctx.set_prop(FLContextKey.TASK_NAME, value=task_name, private=True, sticky=False)
fl_ctx.set_prop(FLContextKey.TASK_ID, value=task_id, private=True, sticky=False)
fl_ctx.set_prop(FLContextKey.TASK_DATA, value=task_data, private=True, sticky=False)
self.log_info(fl_ctx, f"assigned task to client {client.name}: name={task_name}, id={task_id}")
return task_name, task_id, task_data
if timeout is None or time.time() - start > timeout:
break
time.sleep(retry_interval)
# ask client to retry
return "", "", None
def handle_dead_job(self, client_name: str, fl_ctx: FLContext):
with self.wf_lock:
try:
if self.current_wf is None:
return
self.current_wf.responder.handle_dead_job(client_name=client_name, fl_ctx=fl_ctx)
except Exception as e:
self.log_exception(
fl_ctx, f"Error processing dead job by workflow {self.current_wf.id}: {secure_format_exception(e)}"
)
def process_submission(self, client: Client, task_name: str, task_id: str, result: Shareable, fl_ctx: FLContext):
"""Process task result submitted from a client.
NOTE: the Engine will create a new fl_ctx and call this method:
with engine.new_context() as fl_ctx:
name, id, data = runner.process_submission(client, fl_ctx)
Args:
client: Client object
task_name: task name
task_id: task id
result: task result
fl_ctx: FLContext
"""
self.log_info(fl_ctx, f"got result from client {client.name} for task: name={task_name}, id={task_id}")
if not isinstance(result, Shareable):
self.log_error(fl_ctx, "invalid result submission: must be Shareable but got {}".format(type(result)))
return
# set the reply prop so log msg context could include RC from it
fl_ctx.set_prop(FLContextKey.REPLY, result, private=True, sticky=False)
fl_ctx.set_prop(FLContextKey.TASK_NAME, value=task_name, private=True, sticky=False)
fl_ctx.set_prop(FLContextKey.TASK_RESULT, value=result, private=True, sticky=False)
fl_ctx.set_prop(FLContextKey.TASK_ID, value=task_id, private=True, sticky=False)
client_audit_event_id = result.get_header(ReservedHeaderKey.AUDIT_EVENT_ID, "")
add_job_audit_event(
fl_ctx=fl_ctx, ref=client_audit_event_id, msg=f"received result from client '{client.name}'"
)
if self.status != "started":
self.log_info(fl_ctx, "ignored result submission since server runner's status is {}".format(self.status))
return
peer_ctx = fl_ctx.get_peer_context()
if not isinstance(peer_ctx, FLContext):
self.log_info(fl_ctx, "invalid result submission: no peer context - dropped")
return
peer_job_id = peer_ctx.get_job_id()
if not peer_job_id or peer_job_id != self.job_id:
# the client is on a different RUN
self.log_info(fl_ctx, "invalid result submission: not the same job id - dropped")
return
rc = result.get_return_code(default=ReturnCode.OK)
if rc in self.ABORT_RETURN_CODES:
self.log_error(fl_ctx, f"aborting ServerRunner due to fatal return code {rc} from client {client.name}")
self.system_panic(
reason=f"Aborted job {self.job_id} due to fatal return code {rc} from client {client.name}",
fl_ctx=fl_ctx,
)
return
result.set_header(ReservedHeaderKey.TASK_NAME, task_name)
result.set_header(ReservedHeaderKey.TASK_ID, task_id)
result.set_peer_props(peer_ctx.get_all_public_props())
with self.wf_lock:
try:
if self.current_wf is None:
self.log_info(fl_ctx, "no current workflow - dropped submission.")
return
wf_id = result.get_cookie(ReservedHeaderKey.WORKFLOW, None)
if wf_id is not None and wf_id != self.current_wf.id:
self.log_info(
fl_ctx,
"Got result for workflow {}, but we are running {} - dropped submission.".format(
wf_id, self.current_wf.id
),
)
return
# filter task result
self.log_debug(fl_ctx, "firing event EventType.BEFORE_TASK_RESULT_FILTER")
self.fire_event(EventType.BEFORE_TASK_RESULT_FILTER, fl_ctx)
try:
filter_name = Scope.TASK_RESULT_FILTERS_NAME
result = apply_filters(
filter_name, result, fl_ctx, self.config.task_result_filters, task_name, FilterKey.IN
)
except Exception as e:
self.log_exception(
fl_ctx,
"processing error in task result filter {}; ".format(secure_format_exception(e)),
)
result = make_reply(ReturnCode.TASK_RESULT_FILTER_ERROR)
self.log_debug(fl_ctx, "firing event EventType.AFTER_TASK_RESULT_FILTER")
self.fire_event(EventType.AFTER_TASK_RESULT_FILTER, fl_ctx)
self.log_debug(fl_ctx, "firing event EventType.BEFORE_PROCESS_SUBMISSION")
self.fire_event(EventType.BEFORE_PROCESS_SUBMISSION, fl_ctx)
self.current_wf.responder.process_submission(
client=client, task_name=task_name, task_id=task_id, result=result, fl_ctx=fl_ctx
)
self.log_info(fl_ctx, "finished processing client result by {}".format(self.current_wf.id))
self.log_debug(fl_ctx, "firing event EventType.AFTER_PROCESS_SUBMISSION")
self.fire_event(EventType.AFTER_PROCESS_SUBMISSION, fl_ctx)
except Exception as e:
self.log_exception(
fl_ctx,
"Error processing client result by {}: {}".format(self.current_wf.id, secure_format_exception(e)),
)
def abort(self, fl_ctx: FLContext, turn_to_cold: bool = False):
self.status = "done"
self.abort_signal.trigger(value=True)
self.turn_to_cold = turn_to_cold
self.log_info(fl_ctx, "asked to abort - triggered abort_signal to stop the RUN")
def get_persist_state(self, fl_ctx: FLContext) -> dict:
return {"job_id": str(self.job_id), "current_wf_index": self.current_wf_index}
def restore(self, state_data: dict, fl_ctx: FLContext):
self.job_id = state_data.get("job_id")
self.current_wf_index = int(state_data.get("current_wf_index", 0))
| NVFlare-main | nvflare/private/fed/server/server_runner.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import List
from nvflare.apis.fl_constant import AdminCommandNames
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.proto import MetaStatusValue, make_meta
from nvflare.fuel.hci.reg import CommandModuleSpec, CommandSpec
from nvflare.fuel.hci.server.authz import PreAuthzReturnCode
from nvflare.fuel.hci.server.constants import ConnProps
from nvflare.private.defs import InfoCollectorTopic, RequestHeader
from nvflare.private.fed.server.admin import new_message
from nvflare.private.fed.server.server_engine_internal_spec import ServerEngineInternalSpec
from nvflare.widgets.info_collector import InfoCollector
from nvflare.widgets.widget import WidgetID
from .cmd_utils import CommandUtil
from .job_cmds import JobCommandModule
class InfoCollectorCommandModule(JobCommandModule, CommandUtil):
"""This class is for server side info collector commands.
NOTE: we only support Server side info collector commands for now,
due to the complexity of client-side process/child-process architecture.
"""
CONN_KEY_COLLECTOR = "collector"
def get_spec(self):
return CommandModuleSpec(
name="info",
cmd_specs=[
CommandSpec(
name=AdminCommandNames.SHOW_STATS,
description="show current system stats for an actively running job",
usage="show_stats job_id server|client [clients]",
handler_func=self.show_stats,
authz_func=self.authorize_info_collection,
visible=True,
),
CommandSpec(
name=AdminCommandNames.SHOW_ERRORS,
description="show latest errors in an actively running job",
usage="show_errors job_id server|client [clients]",
handler_func=self.show_errors,
authz_func=self.authorize_info_collection,
visible=True,
),
CommandSpec(
name=AdminCommandNames.RESET_ERRORS,
description="reset error stats for an actively running job",
usage="reset_errors job_id server|client [clients]",
handler_func=self.reset_errors,
authz_func=self.authorize_info_collection,
visible=True,
),
],
)
def authorize_info_collection(self, conn: Connection, args: List[str]):
if len(args) < 3:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_error(f"Usage: {cmd_entry.usage}", meta=make_meta(MetaStatusValue.SYNTAX_ERROR))
return PreAuthzReturnCode.ERROR
rt = self.authorize_job(conn, args)
if rt == PreAuthzReturnCode.ERROR:
return rt
engine = conn.app_ctx
if not isinstance(engine, ServerEngineInternalSpec):
raise TypeError("engine must be ServerEngineInternalSpec but got {}".format(type(engine)))
collector = engine.get_widget(WidgetID.INFO_COLLECTOR)
if not collector:
msg = "info collector not available"
conn.append_error(msg, meta=make_meta(MetaStatusValue.INTERNAL_ERROR, msg))
return PreAuthzReturnCode.ERROR
if not isinstance(collector, InfoCollector):
msg = "info collector not right object"
conn.append_error(msg, meta=make_meta(MetaStatusValue.INTERNAL_ERROR, msg))
return PreAuthzReturnCode.ERROR
conn.set_prop(self.CONN_KEY_COLLECTOR, collector)
job_id = conn.get_prop(self.JOB_ID)
if job_id not in engine.run_processes:
conn.append_error(
f"Job_id: {job_id} is not running.", meta=make_meta(MetaStatusValue.JOB_NOT_RUNNING, job_id)
)
return PreAuthzReturnCode.ERROR
run_info = engine.get_app_run_info(job_id)
if not run_info:
conn.append_string(
f"Cannot find job: {job_id}. Please make sure the first arg following the command is a valid job_id.",
meta=make_meta(MetaStatusValue.INVALID_JOB_ID, job_id),
)
return PreAuthzReturnCode.ERROR
return rt
def show_stats(self, conn: Connection, args: List[str]):
engine = conn.app_ctx
self._collect_stats(conn, args, stats_func=engine.show_stats, msg_topic=InfoCollectorTopic.SHOW_STATS)
def _collect_stats(self, conn: Connection, args: List[str], stats_func, msg_topic):
job_id = conn.get_prop(self.JOB_ID)
target_type = args[2]
result = {}
if target_type in [self.TARGET_TYPE_SERVER, self.TARGET_TYPE_ALL]:
server_stats = stats_func(job_id)
result["server"] = server_stats
if target_type in [self.TARGET_TYPE_CLIENT, self.TARGET_TYPE_ALL]:
message = new_message(conn, topic=msg_topic, body="", require_authz=True)
message.set_header(RequestHeader.JOB_ID, job_id)
replies = self.send_request_to_clients(conn, message)
self._process_stats_replies(conn, replies, result)
conn.append_any(result)
def show_errors(self, conn: Connection, args: List[str]):
engine = conn.app_ctx
self._collect_stats(conn, args, stats_func=engine.get_errors, msg_topic=InfoCollectorTopic.SHOW_ERRORS)
def reset_errors(self, conn: Connection, args: List[str]):
engine = conn.app_ctx
self._collect_stats(conn, args, stats_func=engine.reset_errors, msg_topic=InfoCollectorTopic.RESET_ERRORS)
@staticmethod
def _process_stats_replies(conn, replies, result: dict):
if not replies:
return
for r in replies:
client_name = r.client_name
try:
body = json.loads(r.reply.body)
result[client_name] = body
except Exception:
result[client_name] = "invalid_reply"
return
| NVFlare-main | nvflare/private/fed/server/info_coll_cmd.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/private/fed/server/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.f3.cellnet.core_cell import FQCN
from nvflare.fuel.f3.cellnet.core_cell import Message as CellMessage
from nvflare.fuel.f3.cellnet.core_cell import TargetMessage
from nvflare.private.admin_defs import Message
from nvflare.private.defs import CellChannel, new_cell_message
class ClientReply(object):
def __init__(self, client_token: str, client_name: str, req: Message, reply: Message):
"""Client reply.
Args:
client_token (str): client token
client_name (str): name of the client
req (Message): request
reply (Message): reply
"""
self.client_token = client_token
self.client_name = client_name
self.request = req
self.reply = reply
def send_requests(
cell, command: str, requests: dict, clients, job_id=None, timeout_secs=2.0, optional=False
) -> [ClientReply]:
"""Send requests to clients.
NOTE::
This method is to be used by a Command Handler to send requests to Clients.
Hence, it is run in the Command Handler's handling thread.
This is a blocking call - returned only after all responses are received or timeout.
Args:
cell: the source cell
command: the command to be sent
clients: the clients the command will be sent to
requests: A dict of requests: {client token: request or list of requests}
job_id: id of the job that the command is applied to
timeout_secs: how long to wait for reply before timeout
optional: whether the message is optional
Returns:
A list of ClientReply
"""
if not isinstance(requests, dict):
raise TypeError("requests must be a dict but got {}".format(type(requests)))
if len(requests) == 0:
return []
target_msgs = {}
name_to_token = {}
name_to_req = {}
for token, req in requests.items():
client = clients.get(token)
if not client:
continue
if job_id:
fqcn = FQCN.join([client.name, job_id])
channel = CellChannel.CLIENT_COMMAND
optional = True
else:
fqcn = client.name
channel = CellChannel.CLIENT_MAIN
target_msgs[client.name] = TargetMessage(
target=fqcn, channel=channel, topic=command, message=new_cell_message({}, req)
)
name_to_token[client.name] = token
name_to_req[client.name] = req
if not target_msgs:
return []
if timeout_secs <= 0.0:
# this is fire-and-forget!
cell.fire_multi_requests_and_forget(target_msgs, optional=optional)
return []
else:
result = []
replies = cell.broadcast_multi_requests(target_msgs, timeout_secs, optional=optional)
for name, reply in replies.items():
assert isinstance(reply, CellMessage)
result.append(
ClientReply(
client_token=name_to_token[name], client_name=name, req=name_to_req[name], reply=reply.payload
)
)
return result
| NVFlare-main | nvflare/private/fed/server/message_send.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
import uuid
from typing import Optional
from nvflare.apis.client import Client
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.fuel.f3.cellnet.defs import MessagePropKey
from nvflare.fuel.f3.drivers.driver_params import DriverParams
from nvflare.private.defs import CellMessageHeaderKeys
class ClientManager:
def __init__(self, project_name=None, min_num_clients=2, max_num_clients=10):
"""Manages client adding and removing.
Args:
project_name: project name
min_num_clients: minimum number of clients allowed.
max_num_clients: maximum number of clients allowed.
"""
self.project_name = project_name
# TODO:: remove min num clients
self.min_num_clients = min_num_clients
self.max_num_clients = max_num_clients
self.clients = dict() # token => Client
self.lock = threading.Lock()
self.logger = logging.getLogger(self.__class__.__name__)
def authenticate(self, request, context) -> Optional[Client]:
client = self.login_client(request, context)
if not client:
return None
# client_ip = context.peer().split(":")[1]
client_ip = request.get_header(CellMessageHeaderKeys.CLIENT_IP)
# new client join
with self.lock:
self.clients.update({client.token: client})
self.logger.info(
"Client: New client {} joined. Sent token: {}. Total clients: {}".format(
client.name + "@" + client_ip, client.token, len(self.clients)
)
)
return client
def remove_client(self, token):
"""Remove a registered client.
Args:
token: client token
Returns:
The removed Client object
"""
with self.lock:
client = self.clients.pop(token)
self.logger.info(
"Client Name:{} \tToken: {} left. Total clients: {}".format(client.name, token, len(self.clients))
)
return client
def login_client(self, client_login, context):
if not self.is_valid_task(client_login.get_header(CellMessageHeaderKeys.PROJECT_NAME)):
# context.abort(grpc.StatusCode.INVALID_ARGUMENT, "Requested task does not match the current server task")
context.set_prop(
FLContextKey.UNAUTHENTICATED, "Requested task does not match the current server task", sticky=False
)
return None
return self.authenticated_client(client_login, context)
def validate_client(self, request, fl_ctx: FLContext, allow_new=False):
"""Validate the client state message.
Args:
request: A request from client.
fl_ctx: FLContext
allow_new: whether to allow new client. Note that its task should still match server's.
Returns:
client id if it's a valid client
"""
# token = client_state.token
token = request.get_header(CellMessageHeaderKeys.TOKEN)
if not token:
# context.abort(grpc.StatusCode.INVALID_ARGUMENT, "Could not read client uid from the payload")
fl_ctx.set_prop(FLContextKey.UNAUTHENTICATED, "Could not read client uid from the payload", sticky=False)
client = None
elif not self.is_valid_task(request.get_header(CellMessageHeaderKeys.PROJECT_NAME)):
# context.abort(grpc.StatusCode.INVALID_ARGUMENT, "Requested task does not match the current server task")
fl_ctx.set_prop(
FLContextKey.UNAUTHENTICATED, "Requested task does not match the current server task", sticky=False
)
client = None
elif not (allow_new or self.is_from_authorized_client(token)):
# context.abort(grpc.StatusCode.UNAUTHENTICATED, "Unknown client identity")
fl_ctx.set_prop(FLContextKey.UNAUTHENTICATED, "Unknown client identity", sticky=False)
client = None
else:
client = self.clients.get(token)
return client
def authenticated_client(self, request, context) -> Optional[Client]:
"""Use SSL certificate for authenticate the client.
Args:
request: client login request Message
context: FL_Context
Returns:
Client object.
"""
client_name = request.get_header(CellMessageHeaderKeys.CLIENT_NAME)
client = self.clients.get(client_name)
if not client:
fqcn = request.get_prop(MessagePropKey.ENDPOINT).conn_props.get(DriverParams.PEER_CN.value)
if fqcn and fqcn != client_name:
context.set_prop(
FLContextKey.UNAUTHENTICATED,
f"Requested fqcn:{fqcn} does not match the client_name: {client_name}",
sticky=False,
)
return None
with self.lock:
clients_to_be_removed = [token for token, client in self.clients.items() if client.name == client_name]
for item in clients_to_be_removed:
self.clients.pop(item)
self.logger.info(f"Client: {client_name} already registered. Re-login the client with a new token.")
client = Client(client_name, str(uuid.uuid4()))
if len(self.clients) >= self.max_num_clients:
context.set_prop(FLContextKey.UNAUTHENTICATED, "Maximum number of clients reached", sticky=False)
self.logger.info(f"Maximum number of clients reached. Reject client: {client_name} login.")
return None
return client
def is_from_authorized_client(self, token):
"""Check if a client is authorized.
Args:
token: client token
Returns:
True if it is a recognised client
"""
return token in self.clients
def is_valid_task(self, task):
"""Check whether the requested task matches the server's project_name.
Returns:
True if task name is the same as server's project name.
"""
# TODO: change the name of this method
return task == self.project_name
def heartbeat(self, token, client_name, fl_ctx):
"""Update the heartbeat of the client.
Args:
token: client token
client_name: client name
fl_ctx: FLContext
Returns:
If a new client needs to be created.
"""
with self.lock:
client = self.clients.get(token)
if client:
client.last_connect_time = time.time()
# self.clients.update({token: time.time()})
self.logger.debug(f"Receive heartbeat from Client:{token}")
return False
else:
for _token, _client in self.clients.items():
if _client.name == client_name:
# context.abort(
# grpc.StatusCode.FAILED_PRECONDITION,
# "Client ID already registered as a client: {}".format(client_name),
# )
fl_ctx.set_prop(
FLContextKey.COMMUNICATION_ERROR,
"Client ID already registered as a client: {}".format(client_name),
sticky=False,
)
self.logger.info(
f"Failed to re-activate the client:{client_name} with token: {token}. "
f"Client already exist with token: {_token}."
)
return False
client = Client(client_name, token)
client.last_connect_time = time.time()
# self._set_instance_name(client)
self.clients.update({token: client})
self.logger.info("Re-activate the client:{} with token: {}".format(client_name, token))
return True
def get_clients(self):
"""Get the list of registered clients.
Returns:
A dict of {client_token: client}
"""
return self.clients
def get_min_clients(self):
return self.min_num_clients
def get_max_clients(self):
return self.max_num_clients
def get_all_clients_from_inputs(self, inputs):
clients = []
invalid_inputs = []
for item in inputs:
client = self.clients.get(item)
# if item in self.get_all_clients():
if client:
clients.append(client)
else:
client = self.get_client_from_name(item)
if client:
clients.append(client)
else:
invalid_inputs.append(item)
return clients, invalid_inputs
def get_client_from_name(self, client_name):
clients = list(self.get_clients().values())
for c in clients:
if client_name == c.name:
return c
return None
| NVFlare-main | nvflare/private/fed/server/client_manager.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import List
import psutil
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandSpec
from nvflare.private.admin_defs import MsgHeader, ReturnCode
from nvflare.private.defs import SysCommandTopic
from nvflare.private.fed.server.admin import new_message
from nvflare.private.fed.server.cmd_utils import CommandUtil
from nvflare.security.logging import secure_format_exception
def _parse_replies(conn, replies):
"""parses resources from replies."""
site_resources = {}
for r in replies:
client_name = r.client_name
if r.reply:
if r.reply.get_header(MsgHeader.RETURN_CODE) == ReturnCode.ERROR:
resources = r.reply.body
else:
try:
resources = json.loads(r.reply.body)
except Exception as e:
resources = f"Bad replies: {secure_format_exception(e)}"
else:
resources = "No replies"
site_resources[client_name] = resources
return site_resources
class SystemCommandModule(CommandModule, CommandUtil):
def get_spec(self):
return CommandModuleSpec(
name="sys",
cmd_specs=[
CommandSpec(
name="sys_info",
description="get the system info",
usage="sys_info server|client <client-name> ...",
handler_func=self.sys_info,
authz_func=self.authorize_server_operation,
visible=True,
),
CommandSpec(
name="report_resources",
description="get the resources info",
usage="report_resources server | client <client-name> ...",
handler_func=self.report_resources,
authz_func=self.authorize_server_operation,
visible=True,
),
],
)
def sys_info(self, conn: Connection, args: [str]):
if len(args) < 2:
conn.append_error("syntax error: missing site names")
return
target_type = args[1]
if target_type == self.TARGET_TYPE_SERVER:
infos = dict(psutil.virtual_memory()._asdict())
table = conn.append_table(["Metrics", "Value"])
for k, v in infos.items():
table.add_row([str(k), str(v)])
table.add_row(
[
"available_percent",
"%.1f" % (psutil.virtual_memory().available * 100 / psutil.virtual_memory().total),
]
)
return
if target_type == self.TARGET_TYPE_CLIENT:
message = new_message(conn, topic=SysCommandTopic.SYS_INFO, body="", require_authz=True)
replies = self.send_request_to_clients(conn, message)
self._process_replies(conn, replies)
return
conn.append_string("invalid target type {}. Usage: sys_info server|client <client-name>".format(target_type))
def _process_replies(self, conn, replies):
if not replies:
conn.append_error("no responses from clients")
return
for r in replies:
client_name = r.client_name
conn.append_string("Client: " + client_name)
table = conn.append_table(["Metrics", "Value"])
if r.reply:
if r.reply.get_header(MsgHeader.RETURN_CODE) == ReturnCode.ERROR:
table.add_row([r.reply.body, ""])
else:
try:
infos = json.loads(r.reply.body)
for k, v in infos.items():
table.add_row([str(k), str(v)])
table.add_row(
[
"available_percent",
"%.1f" % (psutil.virtual_memory().available * 100 / psutil.virtual_memory().total),
]
)
except Exception:
conn.append_string(": Bad replies")
else:
conn.append_string(": No replies")
def report_resources(self, conn: Connection, args: List[str]):
if len(args) < 2:
conn.append_error("syntax error: missing site names")
return
target_type = args[1]
if target_type != self.TARGET_TYPE_CLIENT and target_type != self.TARGET_TYPE_SERVER:
conn.append_string(
"invalid target type {}. Usage: sys_info server|client <client-name>".format(target_type)
)
return
site_resources = {"server": "unlimited"}
if target_type == self.TARGET_TYPE_CLIENT:
message = new_message(conn, topic=SysCommandTopic.REPORT_RESOURCES, body="", require_authz=True)
replies = self.send_request_to_clients(conn, message)
if not replies:
conn.append_error("no responses from clients")
return
site_resources = _parse_replies(conn, replies)
table = conn.append_table(["Sites", "Resources"])
for k, v in site_resources.items():
table.add_row([str(k), str(v)])
| NVFlare-main | nvflare/private/fed/server/sys_cmd.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.hci.reg import CommandModule
from nvflare.private.fed.server.info_coll_cmd import InfoCollectorCommandModule
from nvflare.private.fed.server.job_cmds import JobCommandModule
from nvflare.private.fed.server.shell_cmd import ShellCommandModule
from nvflare.private.fed.server.sys_cmd import SystemCommandModule
from nvflare.private.fed.server.training_cmds import TrainingCommandModule
class ServerCommandModules:
cmd_modules = [
ShellCommandModule(),
SystemCommandModule(),
TrainingCommandModule(),
JobCommandModule(),
InfoCollectorCommandModule(),
]
@staticmethod
def register_cmd_module(cmd_module: CommandModule):
ServerCommandModules.cmd_modules.append(cmd_module)
| NVFlare-main | nvflare/private/fed/server/server_cmd_modules.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from typing import List, Optional
from nvflare.fuel.f3.cellnet.cell import Cell
from nvflare.fuel.f3.cellnet.net_agent import NetAgent
from nvflare.fuel.f3.cellnet.net_manager import NetManager
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.reg import CommandModule
from nvflare.fuel.hci.server.audit import CommandAudit
from nvflare.fuel.hci.server.authz import AuthzFilter
from nvflare.fuel.hci.server.builtin import new_command_register_with_builtin_module
from nvflare.fuel.hci.server.constants import ConnProps
from nvflare.fuel.hci.server.hci import AdminServer
from nvflare.fuel.hci.server.login import LoginModule, SessionManager, SimpleAuthenticator
from nvflare.fuel.sec.audit import Auditor, AuditService
from nvflare.private.admin_defs import Message
from nvflare.private.defs import ERROR_MSG_PREFIX, RequestHeader
from nvflare.private.fed.server.message_send import ClientReply, send_requests
def new_message(conn: Connection, topic, body, require_authz: bool) -> Message:
msg = Message(topic=topic, body=body)
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
if cmd_entry:
msg.set_header(RequestHeader.ADMIN_COMMAND, cmd_entry.name)
msg.set_header(RequestHeader.REQUIRE_AUTHZ, str(require_authz).lower())
props_to_copy = [
ConnProps.EVENT_ID,
ConnProps.USER_NAME,
ConnProps.USER_ROLE,
ConnProps.USER_ORG,
ConnProps.SUBMITTER_NAME,
ConnProps.SUBMITTER_ORG,
ConnProps.SUBMITTER_ROLE,
]
for p in props_to_copy:
prop = conn.get_prop(p, default=None)
if prop:
msg.set_header(p, prop)
return msg
class _Client(object):
def __init__(self, token, name):
self.token = token
self.name = name
self.last_heard_time = None
class _ClientReq(object):
def __init__(self, client, req: Message):
self.client = client
self.req = req
def check_client_replies(replies: List[ClientReply], client_sites: List[str], command: str):
display_sites = ", ".join(client_sites)
if not replies:
raise RuntimeError(f"Failed to {command} to the clients {display_sites}: no replies.")
if len(replies) != len(client_sites):
raise RuntimeError(f"Failed to {command} to the clients {display_sites}: not enough replies.")
error_msg = ""
for r, client_name in zip(replies, client_sites):
if r.reply and ERROR_MSG_PREFIX in r.reply.body:
error_msg += f"\t{client_name}: {r.reply.body}\n"
if error_msg != "":
raise RuntimeError(f"Failed to {command} to the following clients: \n{error_msg}")
class FedAdminServer(AdminServer):
def __init__(
self,
cell: Cell,
fed_admin_interface,
users,
cmd_modules,
file_upload_dir,
file_download_dir,
host,
port,
ca_cert_file_name,
server_cert_file_name,
server_key_file_name,
accepted_client_cns=None,
download_job_url="",
):
"""The FedAdminServer is the framework for developing admin commands.
Args:
fed_admin_interface: the server's federated admin interface
users: a dict of {username: pwd hash}
cmd_modules: a list of CommandModules
file_upload_dir: the directory for uploaded files
file_download_dir: the directory for files to be downloaded
host: the IP address of the admin server
port: port number of admin server
ca_cert_file_name: the root CA's cert file name
server_cert_file_name: server's cert, signed by the CA
server_key_file_name: server's private key file
accepted_client_cns: list of accepted Common Names from client, if specified
download_job_url: download job url
"""
cmd_reg = new_command_register_with_builtin_module(app_ctx=fed_admin_interface)
self.sai = fed_admin_interface
self.cell = cell
self.client_lock = threading.Lock()
authenticator = SimpleAuthenticator(users)
sess_mgr = SessionManager()
login_module = LoginModule(authenticator, sess_mgr)
cmd_reg.register_module(login_module)
# register filters - order is important!
# login_module is also a filter that determines if user is authenticated
cmd_reg.add_filter(login_module)
# next is the authorization filter and command module
authz_filter = AuthzFilter()
cmd_reg.add_filter(authz_filter)
# audit filter records commands to audit trail
auditor = AuditService.get_auditor()
# TODO:: clean this up
if not isinstance(auditor, Auditor):
raise TypeError("auditor must be Auditor but got {}".format(type(auditor)))
audit_filter = CommandAudit(auditor)
cmd_reg.add_filter(audit_filter)
self.file_upload_dir = file_upload_dir
self.file_download_dir = file_download_dir
cmd_reg.register_module(sess_mgr)
# mpm.add_cleanup_cb(sess_mgr.shutdown)
agent = NetAgent(self.cell)
net_mgr = NetManager(agent)
cmd_reg.register_module(net_mgr)
mpm.add_cleanup_cb(net_mgr.close)
mpm.add_cleanup_cb(agent.close)
if cmd_modules:
if not isinstance(cmd_modules, list):
raise TypeError("cmd_modules must be list but got {}".format(type(cmd_modules)))
for m in cmd_modules:
if not isinstance(m, CommandModule):
raise TypeError("cmd_modules must contain CommandModule but got element of type {}".format(type(m)))
cmd_reg.register_module(m)
AdminServer.__init__(
self,
cmd_reg=cmd_reg,
host=host,
port=port,
ca_cert=ca_cert_file_name,
server_cert=server_cert_file_name,
server_key=server_key_file_name,
accepted_client_cns=accepted_client_cns,
extra_conn_props={
ConnProps.DOWNLOAD_DIR: file_download_dir,
ConnProps.UPLOAD_DIR: file_upload_dir,
ConnProps.DOWNLOAD_JOB_URL: download_job_url,
},
)
self.clients = {} # token => _Client
self.timeout = 10.0
def client_heartbeat(self, token, name: str):
"""Receive client heartbeat.
Args:
token: the session token of the client
name: client name
Returns:
Client.
"""
with self.client_lock:
client = self.clients.get(token)
if not client:
client = _Client(token, name)
self.clients[token] = client
client.last_heard_time = time.time()
return client
def client_dead(self, token):
"""Remove dead client.
Args:
token: the session token of the client
"""
with self.client_lock:
self.clients.pop(token, None)
def get_client_tokens(self) -> []:
"""Get tokens of existing clients."""
result = []
with self.client_lock:
for token in self.clients.keys():
result.append(token)
return result
def send_request_to_client(self, req: Message, client_token: str, timeout_secs=2.0) -> Optional[ClientReply]:
if not isinstance(req, Message):
raise TypeError("request must be Message but got {}".format(type(req)))
reqs = {client_token: req}
replies = self.send_requests(reqs, timeout_secs=timeout_secs)
if replies is None or len(replies) <= 0:
return None
else:
return replies[0]
def send_requests_and_get_reply_dict(self, requests: dict, timeout_secs=2.0) -> dict:
"""Send requests to clients
Args:
requests: A dict of requests: {client token: Message}
timeout_secs: how long to wait for reply before timeout
Returns:
A dict of {client token: reply}, where reply is a Message or None (no reply received)
"""
result = {}
if requests:
for token, _ in requests.items():
result[token] = None
replies = self.send_requests(requests, timeout_secs=timeout_secs)
for r in replies:
result[r.client_token] = r.reply
return result
def send_requests(self, requests: dict, timeout_secs=2.0, optional=False) -> [ClientReply]:
"""Send requests to clients.
NOTE::
This method is to be used by a Command Handler to send requests to Clients.
Hence, it is run in the Command Handler's handling thread.
This is a blocking call - returned only after all responses are received or timeout.
Args:
requests: A dict of requests: {client token: request or list of requests}
timeout_secs: how long to wait for reply before timeout
optional: whether the requests are optional
Returns:
A list of ClientReply
"""
return send_requests(
cell=self.cell,
command="admin",
requests=requests,
clients=self.clients,
timeout_secs=timeout_secs,
optional=optional,
)
def stop(self):
super().stop()
self.sai.close()
| NVFlare-main | nvflare/private/fed/server/admin.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
from nvflare.apis.fl_constant import FLContextKey, ServerCommandKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.utils.fl_context_utils import get_serializable_data
from nvflare.fuel.f3.cellnet.cell import Cell
from nvflare.fuel.f3.cellnet.core_cell import MessageHeaderKey, ReturnCode, make_reply
from nvflare.fuel.f3.message import Message as CellMessage
from nvflare.private.defs import CellChannel, CellMessageHeaderKeys, new_cell_message
from .server_commands import ServerCommands
class ServerCommandAgent(object):
def __init__(self, engine, cell: Cell) -> None:
"""To init the CommandAgent.
Args:
listen_port: port to listen the command
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.asked_to_stop = False
self.engine = engine
self.cell = cell
def start(self):
self.cell.register_request_cb(
channel=CellChannel.SERVER_COMMAND,
topic="*",
cb=self.execute_command,
)
self.cell.register_request_cb(
channel=CellChannel.AUX_COMMUNICATION,
topic="*",
cb=self.aux_communicate,
)
self.logger.info(f"ServerCommandAgent cell register_request_cb: {self.cell.get_fqcn()}")
def execute_command(self, request: CellMessage) -> CellMessage:
if not isinstance(request, CellMessage):
raise RuntimeError("request must be CellMessage but got {}".format(type(request)))
command_name = request.get_header(MessageHeaderKey.TOPIC)
# data = fobs.loads(request.payload)
data = request.payload
token = request.get_header(CellMessageHeaderKeys.TOKEN, None)
# client_name = request.get_header(CellMessageHeaderKeys.CLIENT_NAME, None)
client = None
if token:
client = self._get_client(token)
if client:
data.set_header(ServerCommandKey.FL_CLIENT, client)
command = ServerCommands.get_command(command_name)
if command:
if command_name in ServerCommands.client_request_commands_names:
if not client:
return make_reply(
ReturnCode.AUTHENTICATION_ERROR,
"Request from client: missing client token",
None,
)
with self.engine.new_context() as new_fl_ctx:
if command_name in ServerCommands.client_request_commands_names:
state_check = command.get_state_check(new_fl_ctx)
error = self.engine.server.authentication_check(request, state_check)
if error:
return make_reply(ReturnCode.AUTHENTICATION_ERROR, error, None)
reply = command.process(data=data, fl_ctx=new_fl_ctx)
if reply is not None:
return_message = new_cell_message({}, reply)
return_message.set_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
else:
return_message = make_reply(ReturnCode.PROCESS_EXCEPTION, "No process results", None)
return return_message
else:
return make_reply(ReturnCode.INVALID_REQUEST, "No server command found", None)
def _get_client(self, token):
fl_server = self.engine.server
client_manager = fl_server.client_manager
clients = client_manager.clients
return clients.get(token)
def aux_communicate(self, request: CellMessage) -> CellMessage:
assert isinstance(request, CellMessage), "request must be CellMessage but got {}".format(type(request))
data = request.payload
topic = request.get_header(MessageHeaderKey.TOPIC)
with self.engine.new_context() as fl_ctx:
server_state = self.engine.server.server_state
state_check = server_state.aux_communicate(fl_ctx)
error = self.engine.server.authentication_check(request, state_check)
if error:
make_reply(ReturnCode.AUTHENTICATION_ERROR, error, None)
engine = fl_ctx.get_engine()
reply = engine.dispatch(topic=topic, request=data, fl_ctx=fl_ctx)
shared_fl_ctx = FLContext()
shared_fl_ctx.set_public_props(copy.deepcopy(get_serializable_data(fl_ctx).get_all_public_props()))
reply.set_header(key=FLContextKey.PEER_CONTEXT, value=shared_fl_ctx)
if reply is not None:
return_message = new_cell_message({}, reply)
return_message.set_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
else:
return_message = new_cell_message({}, None)
return return_message
def shutdown(self):
self.asked_to_stop = True
| NVFlare-main | nvflare/private/fed/server/server_command_agent.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ServerStatus(object):
NOT_STARTED = 0
STARTING = 1
STARTED = 2
STOPPED = 3
SHUTDOWN = 4
status_messages = {
NOT_STARTED: "app server not started",
STARTING: "app server starting",
STARTED: "app server started",
STOPPED: "app server stopped",
SHUTDOWN: "FL server shutdown",
}
def get_status_message(status):
return ServerStatus.status_messages.get(status)
| NVFlare-main | nvflare/private/fed/server/server_status.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import threading
import time
from abc import ABC, abstractmethod
from threading import Lock
from typing import Dict, List, Optional
from nvflare.apis.client import Client
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import (
FLContextKey,
MachineStatus,
RunProcessKey,
SecureTrainConst,
ServerCommandKey,
ServerCommandNames,
SnapshotKey,
WorkspaceConstants,
)
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.workspace import Workspace
from nvflare.fuel.common.exit_codes import ProcessExitCode
from nvflare.fuel.f3.cellnet.cell import Cell
# from nvflare.fuel.f3.cellnet.cell import Cell, Message
from nvflare.fuel.f3.cellnet.core_cell import Message
from nvflare.fuel.f3.cellnet.core_cell import make_reply as make_cellnet_reply
from nvflare.fuel.f3.cellnet.defs import MessageHeaderKey
from nvflare.fuel.f3.cellnet.defs import ReturnCode as F3ReturnCode
from nvflare.fuel.f3.cellnet.fqcn import FQCN
from nvflare.fuel.f3.cellnet.net_agent import NetAgent
from nvflare.fuel.f3.drivers.driver_params import DriverParams
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.fuel.utils.zip_utils import unzip_all_from_bytes
from nvflare.ha.overseer_agent import HttpOverseerAgent
from nvflare.private.defs import (
CellChannel,
CellChannelTopic,
CellMessageHeaderKeys,
JobFailureMsgKey,
new_cell_message,
)
from nvflare.private.fed.server.server_command_agent import ServerCommandAgent
from nvflare.private.fed.server.server_runner import ServerRunner
from nvflare.security.logging import secure_format_exception
from nvflare.widgets.fed_event import ServerFedEventRunner
from .client_manager import ClientManager
from .run_manager import RunManager
from .server_engine import ServerEngine
from .server_state import (
ABORT_RUN,
ACTION,
MESSAGE,
NIS,
Cold2HotState,
ColdState,
Hot2ColdState,
HotState,
ServerState,
)
from .server_status import ServerStatus
class BaseServer(ABC):
def __init__(
self,
project_name=None,
min_num_clients=2,
max_num_clients=10,
heart_beat_timeout=600,
handlers: Optional[List[FLComponent]] = None,
shutdown_period=30.0,
):
"""Base server that provides the clients management and server deployment."""
self.project_name = project_name
self.min_num_clients = max(min_num_clients, 1)
self.max_num_clients = max(max_num_clients, 1)
self.heart_beat_timeout = heart_beat_timeout
self.handlers = handlers
self.client_manager = ClientManager(
project_name=self.project_name, min_num_clients=self.min_num_clients, max_num_clients=self.max_num_clients
)
self.cell = None
self.admin_server = None
self.lock = Lock()
self.snapshot_lock = Lock()
self.fl_ctx = FLContext()
self.platform = None
self.shutdown_period = shutdown_period
self.shutdown = False
self.status = ServerStatus.NOT_STARTED
self.abort_signal = None
self.executor = None
self.logger = logging.getLogger(self.__class__.__name__)
def get_all_clients(self) -> Dict[str, Client]:
"""Get the list of registered clients.
Returns:
A dict of {client_token: client}
"""
return self.client_manager.get_clients()
@abstractmethod
def remove_client_data(self, token):
pass
def close(self):
"""Shutdown the server."""
try:
if self.lock:
self.lock.release()
except RuntimeError:
self.logger.info("canceling sync locks")
try:
# if self.cell:
# self.cell.stop()
pass
finally:
self.logger.info("server off")
return 0
def deploy(self, args, grpc_args=None, secure_train=False):
"""Start a grpc server and listening the designated port."""
target = grpc_args["service"].get("target", "0.0.0.0:6007")
scheme = grpc_args["service"].get("scheme", "grpc")
if secure_train:
root_cert = grpc_args[SecureTrainConst.SSL_ROOT_CERT]
ssl_cert = grpc_args[SecureTrainConst.SSL_CERT]
private_key = grpc_args[SecureTrainConst.PRIVATE_KEY]
credentials = {
DriverParams.CA_CERT.value: root_cert,
DriverParams.SERVER_CERT.value: ssl_cert,
DriverParams.SERVER_KEY.value: private_key,
}
else:
credentials = {}
parent_url = None
parts = target.split(":")
if len(parts) > 1:
# "0" means all interfaces for all protocols (ipv4 and ipv6)
listen_target = "0:" + parts[1]
else:
listen_target = target
my_fqcn = FQCN.ROOT_SERVER
self.cell = Cell(
fqcn=my_fqcn,
root_url=scheme + "://" + listen_target,
secure=secure_train,
credentials=credentials,
create_internal_listener=True,
parent_url=parent_url,
)
self.cell.start()
mpm.add_cleanup_cb(self.cell.stop)
# return self.start()
cleanup_thread = threading.Thread(target=self.client_cleanup)
# heartbeat_thread.daemon = True
cleanup_thread.start()
def client_cleanup(self):
while not self.shutdown:
self.remove_dead_clients()
time.sleep(15)
def set_admin_server(self, admin_server):
self.admin_server = admin_server
def remove_dead_clients(self):
# Clean and remove the dead client without heartbeat.
self.logger.debug("trying to remove dead clients .......")
delete = []
for token, client in self.client_manager.get_clients().items():
if client.last_connect_time < time.time() - self.heart_beat_timeout:
delete.append(token)
for token in delete:
client = self.logout_client(token)
self.logger.info(
"Remove the dead Client. Name: {}\t Token: {}. Total clients: {}".format(
client.name, token, len(self.client_manager.get_clients())
)
)
def logout_client(self, token):
client = self.client_manager.remove_client(token)
self.remove_client_data(token)
if self.admin_server:
self.admin_server.client_dead(token)
self.notify_dead_client(client)
return client
def notify_dead_client(self, client):
"""Called to do further processing of the dead client
Args:
client: the dead client
Returns:
"""
pass
def fl_shutdown(self):
self.shutdown = True
start = time.time()
while self.client_manager.clients:
# Wait for the clients to shutdown and quite first.
time.sleep(0.1)
if time.time() - start > self.shutdown_period:
self.logger.info("There are still clients connected. But shutdown the server after timeout.")
break
self.close()
if self.executor:
self.executor.shutdown()
class FederatedServer(BaseServer):
def __init__(
self,
project_name=None,
min_num_clients=2,
max_num_clients=10,
cmd_modules=None,
heart_beat_timeout=600,
handlers: Optional[List[FLComponent]] = None,
args=None,
secure_train=False,
snapshot_persistor=None,
overseer_agent=None,
shutdown_period=30.0,
check_engine_frequency=3.0,
):
"""Federated server services.
Args:
project_name: server project name.
min_num_clients: minimum number of contributors at each round.
max_num_clients: maximum number of contributors at each round.
cmd_modules: command modules.
heart_beat_timeout: heartbeat timeout
handlers: A list of handler
args: arguments
secure_train: whether to use secure communication
"""
BaseServer.__init__(
self,
project_name=project_name,
min_num_clients=min_num_clients,
max_num_clients=max_num_clients,
heart_beat_timeout=heart_beat_timeout,
handlers=handlers,
shutdown_period=shutdown_period,
)
self.contributed_clients = {}
self.tokens = None
self.round_started = time.time()
with self.lock:
self.reset_tokens()
self.cmd_modules = cmd_modules
self.builder = None
self.engine = self._create_server_engine(args, snapshot_persistor)
self.run_manager = None
self.server_runner = None
self.command_agent = None
self.check_engine_frequency = check_engine_frequency
self.processors = {}
self.runner_config = None
self.secure_train = secure_train
self.workspace = args.workspace
self.snapshot_location = None
self.overseer_agent = overseer_agent
self.server_state: ServerState = ColdState()
self.snapshot_persistor = snapshot_persistor
self.checking_server_state = False
self.ha_mode = False
def _register_cellnet_cbs(self):
self.cell.register_request_cb(
channel=CellChannel.SERVER_MAIN,
topic=CellChannelTopic.Register,
cb=self.register_client,
)
self.cell.register_request_cb(
channel=CellChannel.SERVER_MAIN,
topic=CellChannelTopic.Quit,
cb=self.quit_client,
)
self.cell.register_request_cb(
channel=CellChannel.SERVER_MAIN,
topic=CellChannelTopic.HEART_BEAT,
cb=self.client_heartbeat,
)
self.cell.register_request_cb(
channel=CellChannel.SERVER_MAIN,
topic=CellChannelTopic.REPORT_JOB_FAILURE,
cb=self.process_job_failure,
)
self.cell.register_request_cb(
channel=CellChannel.SERVER_PARENT_LISTENER,
topic="*",
cb=self._listen_command,
)
def _listen_command(self, request: Message) -> Message:
job_id = request.get_header(CellMessageHeaderKeys.JOB_ID)
command = request.get_header(MessageHeaderKey.TOPIC)
data = request.payload
if command == ServerCommandNames.GET_CLIENTS:
if job_id in self.engine.run_processes:
clients = self.engine.run_processes[job_id].get(RunProcessKey.PARTICIPANTS)
return_data = {ServerCommandKey.CLIENTS: clients, ServerCommandKey.JOB_ID: job_id}
else:
return_data = {ServerCommandKey.CLIENTS: None, ServerCommandKey.JOB_ID: job_id}
return make_cellnet_reply(F3ReturnCode.OK, "", return_data)
elif command == ServerCommandNames.UPDATE_RUN_STATUS:
execution_error = data.get("execution_error")
with self.lock:
run_process_info = self.engine.run_processes.get(job_id)
if run_process_info is not None:
if execution_error:
run_process_info[RunProcessKey.PROCESS_EXE_ERROR] = True
self.engine.exception_run_processes[job_id] = run_process_info
run_process_info[RunProcessKey.PROCESS_FINISHED] = True
reply = make_cellnet_reply(F3ReturnCode.OK, "", None)
return reply
elif command == ServerCommandNames.HEARTBEAT:
return make_cellnet_reply(F3ReturnCode.OK, "", None)
else:
return make_cellnet_reply(F3ReturnCode.INVALID_REQUEST, "", None)
def _create_server_engine(self, args, snapshot_persistor):
return ServerEngine(
server=self, args=args, client_manager=self.client_manager, snapshot_persistor=snapshot_persistor
)
def create_job_cell(self, job_id, root_url, parent_url, secure_train, server_config) -> Cell:
my_fqcn = FQCN.join([FQCN.ROOT_SERVER, job_id])
if secure_train:
root_cert = server_config[SecureTrainConst.SSL_ROOT_CERT]
ssl_cert = server_config[SecureTrainConst.SSL_CERT]
private_key = server_config[SecureTrainConst.PRIVATE_KEY]
credentials = {
DriverParams.CA_CERT.value: root_cert,
DriverParams.SERVER_CERT.value: ssl_cert,
DriverParams.SERVER_KEY.value: private_key,
}
else:
credentials = {}
cell = Cell(
fqcn=my_fqcn,
root_url=root_url,
secure=secure_train,
credentials=credentials,
create_internal_listener=False,
parent_url=parent_url,
)
cell.start()
net_agent = NetAgent(cell)
mpm.add_cleanup_cb(net_agent.close)
mpm.add_cleanup_cb(cell.stop)
self.command_agent = ServerCommandAgent(self.engine, cell)
self.command_agent.start()
return cell
# @property
def task_meta_info(self, client_name):
"""Task meta information.
The model_meta_info uniquely defines the current model,
it is used to reject outdated client's update.
"""
meta_info = {
CellMessageHeaderKeys.PROJECT_NAME: self.project_name,
CellMessageHeaderKeys.CLIENT_NAME: client_name,
}
return meta_info
def remove_client_data(self, token):
self.tokens.pop(token, None)
def reset_tokens(self):
"""Reset the token set.
After resetting, each client can take a token
and start fetching the current global model.
This function is not thread-safe.
"""
self.tokens = dict()
for token, client in self.get_all_clients().items():
self.tokens[token] = self.task_meta_info(client.name)
def _before_service(self, fl_ctx: FLContext):
# before the service processing
fl_ctx.remove_prop(FLContextKey.COMMUNICATION_ERROR)
fl_ctx.remove_prop(FLContextKey.UNAUTHENTICATED)
def _generate_reply(self, headers, payload, fl_ctx: FLContext):
# process after the service processing
unauthenticated = fl_ctx.get_prop(FLContextKey.UNAUTHENTICATED)
if unauthenticated:
return make_cellnet_reply(rc=F3ReturnCode.UNAUTHENTICATED, error=unauthenticated)
error = fl_ctx.get_prop(FLContextKey.COMMUNICATION_ERROR)
if error:
return make_cellnet_reply(rc=F3ReturnCode.COMM_ERROR, error=error)
else:
return_message = new_cell_message(headers, payload)
return_message.set_header(MessageHeaderKey.RETURN_CODE, F3ReturnCode.OK)
return return_message
def register_client(self, request: Message) -> Message:
"""Register new clients on the fly.
Each client must get registered before getting the global model.
The server will expect updates from the registered clients
for multiple federated rounds.
This function does not change min_num_clients and max_num_clients.
"""
with self.engine.new_context() as fl_ctx:
self._before_service(fl_ctx)
state_check = self.server_state.register(fl_ctx)
error = self._handle_state_check(state_check, fl_ctx)
if error is not None:
return make_cellnet_reply(rc=F3ReturnCode.COMM_ERROR, error=error)
client = self.client_manager.authenticate(request, fl_ctx)
if client and client.token:
self.tokens[client.token] = self.task_meta_info(client.name)
if self.admin_server:
self.admin_server.client_heartbeat(client.token, client.name)
headers = {
CellMessageHeaderKeys.TOKEN: client.token,
CellMessageHeaderKeys.SSID: self.server_state.ssid,
}
else:
headers = {}
return self._generate_reply(headers=headers, payload=None, fl_ctx=fl_ctx)
def _handle_state_check(self, state_check, fl_ctx: FLContext):
if state_check.get(ACTION) in [NIS, ABORT_RUN]:
fl_ctx.set_prop(FLContextKey.COMMUNICATION_ERROR, state_check.get(MESSAGE), sticky=False)
return state_check.get(MESSAGE)
return None
def quit_client(self, request: Message) -> Message:
"""Existing client quits the federated training process.
Server will stop sharing the global model with the client,
further contribution will be rejected.
This function does not change min_num_clients and max_num_clients.
"""
with self.engine.new_context() as fl_ctx:
client = self.client_manager.validate_client(request, fl_ctx)
if client:
token = client.get_token()
self.logout_client(token)
headers = {CellMessageHeaderKeys.MESSAGE: "Removed client"}
return self._generate_reply(headers=headers, payload=None, fl_ctx=fl_ctx)
def process_job_failure(self, request: Message):
payload = request.payload
client = request.get_header(key=MessageHeaderKey.ORIGIN)
if not isinstance(payload, dict):
self.logger.error(
f"dropped bad Job Failure report from {client}: expect payload to be dict but got {type(payload)}"
)
return
job_id = payload.get(JobFailureMsgKey.JOB_ID)
if not job_id:
self.logger.error(f"dropped bad Job Failure report from {client}: no job_id")
return
code = payload.get(JobFailureMsgKey.CODE)
reason = payload.get(JobFailureMsgKey.REASON, "?")
if code == ProcessExitCode.UNSAFE_COMPONENT:
with self.engine.new_context() as fl_ctx:
self.logger.info(f"Aborting job {job_id} due to reported failure from {client}: {reason}")
self.engine.job_runner.stop_run(job_id, fl_ctx)
def client_heartbeat(self, request: Message) -> Message:
with self.engine.new_context() as fl_ctx:
self._before_service(fl_ctx)
state_check = self.server_state.heartbeat(fl_ctx)
error = self._handle_state_check(state_check, fl_ctx)
if error is not None:
return make_cellnet_reply(rc=F3ReturnCode.COMM_ERROR, error=error)
token = request.get_header(CellMessageHeaderKeys.TOKEN)
client_name = request.get_header(CellMessageHeaderKeys.CLIENT_NAME)
if self.client_manager.heartbeat(token, client_name, fl_ctx):
self.tokens[token] = self.task_meta_info(client_name)
if self.admin_server:
self.admin_server.client_heartbeat(token, client_name)
abort_runs = self._sync_client_jobs(request, token)
reply = self._generate_reply(
headers={CellMessageHeaderKeys.MESSAGE: "Heartbeat response"}, payload=None, fl_ctx=fl_ctx
)
if abort_runs:
reply.set_header(CellMessageHeaderKeys.ABORT_JOBS, abort_runs)
display_runs = ",".join(abort_runs)
self.logger.debug(
f"These jobs: {display_runs} are not running on the server. "
f"Ask client: {client_name} to abort these runs."
)
return reply
def _sync_client_jobs(self, request, client_token):
# jobs that are running on client but not on server need to be aborted!
client_jobs = request.get_header(CellMessageHeaderKeys.JOB_IDS)
server_jobs = self.engine.run_processes.keys()
jobs_need_abort = list(set(client_jobs).difference(server_jobs))
# also check jobs that are running on server but not on the client
jobs_on_server_but_not_on_client = list(set(server_jobs).difference(client_jobs))
if jobs_on_server_but_not_on_client:
# notify all the participating clients these jobs are not running on server anymore
for job_id in jobs_on_server_but_not_on_client:
job_info = self.engine.run_processes[job_id]
participating_clients = job_info.get(RunProcessKey.PARTICIPANTS, None)
if participating_clients:
# this is a dict: token => nvflare.apis.client.Client
client = participating_clients.get(client_token, None)
if client:
self._notify_dead_job(client, job_id)
return jobs_need_abort
def _notify_dead_job(self, client, job_id: str):
try:
with self.engine.lock:
shareable = Shareable()
shareable.set_header(ServerCommandKey.FL_CLIENT, client.name)
fqcn = FQCN.join([FQCN.ROOT_SERVER, job_id])
request = new_cell_message({}, shareable)
self.cell.fire_and_forget(
targets=fqcn,
channel=CellChannel.SERVER_COMMAND,
topic=ServerCommandNames.HANDLE_DEAD_JOB,
message=request,
optional=True,
)
except Exception:
self.logger.info("Could not connect to server runner process")
def notify_dead_client(self, client):
"""Called to do further processing of the dead client
Args:
client: the dead client
Returns:
"""
# find all RUNs that this client is participating
if not self.engine.run_processes:
return
for job_id, process_info in self.engine.run_processes.items():
assert isinstance(process_info, dict)
participating_clients = process_info.get(RunProcessKey.PARTICIPANTS, None)
if participating_clients and client.token in participating_clients:
self._notify_dead_job(client, job_id)
def start_run(self, job_id, run_root, conf, args, snapshot):
# Create the FL Engine
workspace = Workspace(args.workspace, "server", args.config_folder)
self.run_manager = self.create_run_manager(workspace, job_id)
self.engine.set_run_manager(self.run_manager)
self.engine.set_configurator(conf)
self.engine.asked_to_stop = False
self.run_manager.cell = self.cell
fed_event_runner = ServerFedEventRunner()
self.run_manager.add_handler(fed_event_runner)
try:
self.server_runner = ServerRunner(config=self.runner_config, job_id=job_id, engine=self.engine)
self.run_manager.add_handler(self.server_runner)
self.run_manager.add_component("_Server_Runner", self.server_runner)
with self.engine.new_context() as fl_ctx:
if snapshot:
self.engine.restore_components(snapshot=snapshot, fl_ctx=FLContext())
fl_ctx.set_prop(FLContextKey.APP_ROOT, run_root, sticky=True)
fl_ctx.set_prop(FLContextKey.CURRENT_RUN, job_id, private=False, sticky=True)
fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True, sticky=True)
fl_ctx.set_prop(FLContextKey.ARGS, args, private=True, sticky=True)
fl_ctx.set_prop(FLContextKey.WORKSPACE_OBJECT, workspace, private=True)
fl_ctx.set_prop(FLContextKey.SECURE_MODE, self.secure_train, private=True, sticky=True)
fl_ctx.set_prop(FLContextKey.RUNNER, self.server_runner, private=True, sticky=True)
engine_thread = threading.Thread(target=self.run_engine)
engine_thread.start()
self.engine.engine_info.status = MachineStatus.STARTED
while self.engine.engine_info.status != MachineStatus.STOPPED:
if self.engine.asked_to_stop:
self.engine.engine_info.status = MachineStatus.STOPPED
time.sleep(self.check_engine_frequency)
finally:
self.engine.engine_info.status = MachineStatus.STOPPED
self.run_manager = None
def create_run_manager(self, workspace, job_id):
return RunManager(
server_name=self.project_name,
engine=self.engine,
job_id=job_id,
workspace=workspace,
components=self.runner_config.components,
client_manager=self.client_manager,
handlers=self.runner_config.handlers,
)
def authentication_check(self, request: Message, state_check):
error = None
# server_state = self.engine.server.server_state
if state_check.get(ACTION) in [NIS, ABORT_RUN]:
# return make_reply(ReturnCode.AUTHENTICATION_ERROR, state_check.get(MESSAGE), fobs.dumps(None))
error = state_check.get(MESSAGE)
client_ssid = request.get_header(CellMessageHeaderKeys.SSID, None)
if client_ssid != self.server_state.ssid:
# return make_reply(ReturnCode.AUTHENTICATION_ERROR, "Request from invalid client SSID",
# fobs.dumps(None))
error = "Request from unknown client SSID"
return error
def abort_run(self):
with self.engine.new_context() as fl_ctx:
if self.server_runner:
self.server_runner.abort(fl_ctx)
def run_engine(self):
self.engine.engine_info.status = MachineStatus.STARTED
try:
self.server_runner.run()
except Exception as e:
self.logger.error(f"FL server execution exception: {secure_format_exception(e)}")
finally:
# self.engine.update_job_run_status()
self.stop_run_engine_cell()
self.engine.engine_info.status = MachineStatus.STOPPED
def stop_run_engine_cell(self):
# self.cell.stop()
# mpm.stop()
pass
def deploy(self, args, grpc_args=None, secure_train=False):
super().deploy(args, grpc_args, secure_train)
target = grpc_args["service"].get("target", "0.0.0.0:6007")
with self.lock:
self.server_state.host = target.split(":")[0]
self.server_state.service_port = target.split(":")[1]
self.overseer_agent = self._init_agent(args)
if isinstance(self.overseer_agent, HttpOverseerAgent):
self.ha_mode = True
if secure_train:
if self.overseer_agent:
self.overseer_agent.set_secure_context(
ca_path=grpc_args["ssl_root_cert"],
cert_path=grpc_args["ssl_cert"],
prv_key_path=grpc_args["ssl_private_key"],
)
self.engine.cell = self.cell
self._register_cellnet_cbs()
self.overseer_agent.start(self.overseer_callback)
def _init_agent(self, args=None):
kv_list = parse_vars(args.set)
sp = kv_list.get("sp")
if sp:
with self.engine.new_context() as fl_ctx:
fl_ctx.set_prop(FLContextKey.SP_END_POINT, sp)
self.overseer_agent.initialize(fl_ctx)
return self.overseer_agent
def _check_server_state(self, overseer_agent):
if overseer_agent.is_shutdown():
self.engine.shutdown_server()
return
sp = overseer_agent.get_primary_sp()
old_state_name = self.server_state.__class__.__name__
with self.lock:
with self.engine.new_context() as fl_ctx:
self.server_state = self.server_state.handle_sd_callback(sp, fl_ctx)
if isinstance(self.server_state, Cold2HotState):
self._turn_to_hot()
elif isinstance(self.server_state, Hot2ColdState):
self._turn_to_cold(old_state_name)
def _notify_state_change(self, old_state_name):
new_state_name = self.server_state.__class__.__name__
if new_state_name != old_state_name:
self.logger.info(f"state changed from: {old_state_name} to: {new_state_name}")
keys = list(self.engine.run_processes.keys())
if keys:
target_fqcns = []
for job_id in keys:
target_fqcns.append(FQCN.join([FQCN.ROOT_SERVER, job_id]))
cell_msg = new_cell_message(headers={}, payload=self.server_state)
self.cell.broadcast_request(
channel=CellChannel.SERVER_COMMAND,
topic=ServerCommandNames.SERVER_STATE,
request=cell_msg,
targets=target_fqcns,
timeout=5.0,
optional=True,
)
def overseer_callback(self, overseer_agent):
if self.checking_server_state:
self.logger.debug("busy checking server state")
return
self.checking_server_state = True
try:
self._check_server_state(overseer_agent)
except Exception as ex:
self.logger.error(f"exception in checking server state: {secure_format_exception(ex)}")
finally:
self.checking_server_state = False
def _turn_to_hot(self):
# Restore Snapshot
if self.ha_mode:
restored_job_ids = []
with self.snapshot_lock:
fl_snapshot = self.snapshot_persistor.retrieve()
if fl_snapshot:
for run_number, snapshot in fl_snapshot.run_snapshots.items():
if snapshot and not snapshot.completed:
# Restore the workspace
workspace_data = snapshot.get_component_snapshot(SnapshotKey.WORKSPACE).get("content")
dst = os.path.join(self.workspace, WorkspaceConstants.WORKSPACE_PREFIX + str(run_number))
if os.path.exists(dst):
shutil.rmtree(dst, ignore_errors=True)
os.makedirs(dst, exist_ok=True)
unzip_all_from_bytes(workspace_data, dst)
job_id = snapshot.get_component_snapshot(SnapshotKey.JOB_INFO).get(SnapshotKey.JOB_ID)
job_clients = snapshot.get_component_snapshot(SnapshotKey.JOB_INFO).get(
SnapshotKey.JOB_CLIENTS
)
self.logger.info(f"Restore the previous snapshot. Run_number: {run_number}")
with self.engine.new_context() as fl_ctx:
self.engine.job_runner.restore_running_job(
run_number=run_number,
job_id=job_id,
job_clients=job_clients,
snapshot=snapshot,
fl_ctx=fl_ctx,
)
restored_job_ids.append(job_id)
with self.engine.new_context() as fl_ctx:
self.engine.job_runner.update_abnormal_finished_jobs(restored_job_ids, fl_ctx=fl_ctx)
else:
with self.engine.new_context() as fl_ctx:
self.snapshot_persistor.delete()
self.engine.job_runner.update_unfinished_jobs(fl_ctx=fl_ctx)
with self.lock:
self.server_state = HotState(
host=self.server_state.host, port=self.server_state.service_port, ssid=self.server_state.ssid
)
def _turn_to_cold(self, old_state_name):
with self.lock:
self.server_state = ColdState(host=self.server_state.host, port=self.server_state.service_port)
self._notify_state_change(old_state_name)
self.engine.pause_server_jobs()
def stop_training(self):
self.status = ServerStatus.STOPPED
self.logger.info("Server app stopped.\n\n")
def fl_shutdown(self):
self.engine.stop_all_jobs()
self.engine.fire_event(EventType.SYSTEM_END, self.engine.new_context())
super().fl_shutdown()
def close(self):
"""Shutdown the server."""
self.logger.info("shutting down server")
self.shutdown = True
if self.overseer_agent:
self.overseer_agent.end()
return super().close()
| NVFlare-main | nvflare/private/fed/server/fed_server.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import os
import re
import shlex
import shutil
import subprocess
import sys
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from threading import Lock
from typing import Dict, List, Optional, Tuple
from nvflare.apis.client import Client
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import (
AdminCommandNames,
FLContextKey,
MachineStatus,
ReturnCode,
RunProcessKey,
ServerCommandKey,
ServerCommandNames,
SnapshotKey,
WorkspaceConstants,
)
from nvflare.apis.fl_context import FLContext, FLContextManager
from nvflare.apis.fl_snapshot import RunSnapshot
from nvflare.apis.impl.job_def_manager import JobDefManagerSpec
from nvflare.apis.job_def import Job
from nvflare.apis.shareable import Shareable
from nvflare.apis.utils.fl_context_utils import get_serializable_data
from nvflare.apis.workspace import Workspace
from nvflare.fuel.f3.cellnet.core_cell import FQCN, CoreCell
from nvflare.fuel.f3.cellnet.defs import MessageHeaderKey
from nvflare.fuel.f3.cellnet.defs import ReturnCode as CellMsgReturnCode
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.fuel.utils.network_utils import get_open_ports
from nvflare.fuel.utils.zip_utils import zip_directory_to_bytes
from nvflare.private.admin_defs import Message, MsgHeader
from nvflare.private.defs import CellChannel, CellMessageHeaderKeys, RequestHeader, TrainingTopic, new_cell_message
from nvflare.private.fed.server.server_json_config import ServerJsonConfigurator
from nvflare.private.fed.server.server_state import ServerState
from nvflare.private.fed.utils.fed_utils import security_close
from nvflare.private.scheduler_constants import ShareableHeader
from nvflare.security.logging import secure_format_exception
from nvflare.widgets.info_collector import InfoCollector
from nvflare.widgets.widget import Widget, WidgetID
from .client_manager import ClientManager
from .job_runner import JobRunner
from .message_send import ClientReply
from .run_info import RunInfo
from .run_manager import RunManager
from .server_engine_internal_spec import EngineInfo, ServerEngineInternalSpec
from .server_status import ServerStatus
class ServerEngine(ServerEngineInternalSpec):
def __init__(self, server, args, client_manager: ClientManager, snapshot_persistor, workers=3):
"""Server engine.
Args:
server: server
args: arguments
client_manager (ClientManager): client manager.
workers: number of worker threads.
"""
# TODO:: clean up the server function / requirement here should be BaseServer
self.server = server
self.args = args
self.run_processes = {}
self.exception_run_processes = {}
self.run_manager = None
self.conf = None
# TODO:: does this class need client manager?
self.client_manager = client_manager
self.widgets = {
WidgetID.INFO_COLLECTOR: InfoCollector(),
# WidgetID.FED_EVENT_RUNNER: ServerFedEventRunner()
}
self.engine_info = EngineInfo()
if not workers >= 1:
raise ValueError("workers must >= 1 but got {}".format(workers))
self.executor = ThreadPoolExecutor(max_workers=workers)
self.lock = Lock()
self.logger = logging.getLogger(self.__class__.__name__)
self.asked_to_stop = False
self.snapshot_persistor = snapshot_persistor
self.job_runner = None
self.job_def_manager = None
self.kv_list = parse_vars(args.set)
def _get_server_app_folder(self):
return WorkspaceConstants.APP_PREFIX + "server"
def _get_client_app_folder(self, client_name):
return WorkspaceConstants.APP_PREFIX + client_name
def _get_run_folder(self, job_id):
return os.path.join(self.args.workspace, WorkspaceConstants.WORKSPACE_PREFIX + str(job_id))
def get_engine_info(self) -> EngineInfo:
self.engine_info.app_names = {}
if bool(self.run_processes):
self.engine_info.status = MachineStatus.STARTED
else:
self.engine_info.status = MachineStatus.STOPPED
keys = list(self.run_processes.keys())
for job_id in keys:
run_folder = os.path.join(self.args.workspace, WorkspaceConstants.WORKSPACE_PREFIX + str(job_id))
app_file = os.path.join(run_folder, "fl_app.txt")
if os.path.exists(app_file):
with open(app_file, "r") as f:
self.engine_info.app_names[job_id] = f.readline().strip()
else:
self.engine_info.app_names[job_id] = "?"
return self.engine_info
def get_run_info(self) -> Optional[RunInfo]:
if self.run_manager:
run_info: RunInfo = self.run_manager.get_run_info()
return run_info
return None
def delete_job_id(self, num):
job_id_folder = os.path.join(self.args.workspace, WorkspaceConstants.WORKSPACE_PREFIX + str(num))
if os.path.exists(job_id_folder):
shutil.rmtree(job_id_folder)
return ""
def get_clients(self) -> [Client]:
return list(self.client_manager.get_clients().values())
def validate_targets(self, client_names: List[str]) -> Tuple[List[Client], List[str]]:
return self.client_manager.get_all_clients_from_inputs(client_names)
def start_app_on_server(self, run_number: str, job: Job = None, job_clients=None, snapshot=None) -> str:
if run_number in self.run_processes.keys():
return f"Server run: {run_number} already started."
else:
workspace = Workspace(root_dir=self.args.workspace, site_name="server")
app_root = workspace.get_app_dir(run_number)
if not os.path.exists(app_root):
return "Server app does not exist. Please deploy the server app before starting."
self.engine_info.status = MachineStatus.STARTING
app_custom_folder = workspace.get_app_custom_dir(run_number)
if not isinstance(job, Job):
return "Must provide a job object to start the server app."
open_ports = get_open_ports(2)
self._start_runner_process(
self.args,
app_root,
run_number,
app_custom_folder,
open_ports,
job.job_id,
job_clients,
snapshot,
self.server.cell,
self.server.server_state,
)
self.engine_info.status = MachineStatus.STARTED
return ""
def remove_exception_process(self, job_id):
with self.lock:
if job_id in self.exception_run_processes:
self.exception_run_processes.pop(job_id)
def wait_for_complete(self, job_id, process):
process.wait()
run_process_info = self.run_processes.get(job_id)
if run_process_info:
# Wait for the job process to finish UPDATE_RUN_STATUS process
start_time = time.time()
max_wait = 2.0
while True:
process_finished = run_process_info.get(RunProcessKey.PROCESS_FINISHED, False)
if process_finished:
break
if time.time() - start_time >= max_wait:
self.logger.debug(f"Job:{job_id} UPDATE_RUN_STATUS didn't finish fast enough.")
break
time.sleep(0.1)
with self.lock:
return_code = process.poll()
# if process exit but with Execution exception
if return_code and return_code != 0:
self.logger.info(f"Job: {job_id} child process exit with return code {return_code}")
run_process_info[RunProcessKey.PROCESS_RETURN_CODE] = return_code
if job_id not in self.exception_run_processes:
self.exception_run_processes[job_id] = run_process_info
self.run_processes.pop(job_id, None)
self.engine_info.status = MachineStatus.STOPPED
def _start_runner_process(
self,
args,
app_root,
run_number,
app_custom_folder,
open_ports,
job_id,
job_clients,
snapshot,
cell: CoreCell,
server_state: ServerState,
):
new_env = os.environ.copy()
if app_custom_folder != "":
new_env["PYTHONPATH"] = new_env.get("PYTHONPATH", "") + os.pathsep + app_custom_folder
listen_port = open_ports[1]
if snapshot:
restore_snapshot = True
else:
restore_snapshot = False
command_options = ""
for t in args.set:
command_options += " " + t
command = (
sys.executable
+ " -m nvflare.private.fed.app.server.runner_process -m "
+ args.workspace
+ " -s fed_server.json -r "
+ app_root
+ " -n "
+ str(run_number)
+ " -p "
+ str(cell.get_internal_listener_url())
+ " -u "
+ str(cell.get_root_url_for_child())
+ " --host "
+ str(server_state.host)
+ " --port "
+ str(server_state.service_port)
+ " --ssid "
+ str(server_state.ssid)
+ " --ha_mode "
+ str(self.server.ha_mode)
+ " --set"
+ command_options
+ " print_conf=True restore_snapshot="
+ str(restore_snapshot)
)
# use os.setsid to create new process group ID
process = subprocess.Popen(shlex.split(command, True), preexec_fn=os.setsid, env=new_env)
if not job_id:
job_id = ""
if not job_clients:
job_clients = self.client_manager.clients
with self.lock:
self.run_processes[run_number] = {
RunProcessKey.LISTEN_PORT: listen_port,
RunProcessKey.CONNECTION: None,
RunProcessKey.CHILD_PROCESS: process,
RunProcessKey.JOB_ID: job_id,
RunProcessKey.PARTICIPANTS: job_clients,
}
threading.Thread(target=self.wait_for_complete, args=[run_number, process]).start()
return process
def get_job_clients(self, client_sites):
job_clients = {}
if client_sites:
for site, dispatch_info in client_sites.items():
client = self.get_client_from_name(site)
if client:
job_clients[client.token] = client
return job_clients
def remove_custom_path(self):
regex = re.compile(".*/run_.*/custom")
custom_paths = list(filter(regex.search, sys.path))
for path in custom_paths:
sys.path.remove(path)
def abort_app_on_clients(self, clients: List[str]) -> str:
status = self.engine_info.status
if status == MachineStatus.STOPPED:
return "Server app has not started."
if status == MachineStatus.STARTING:
return "Server app is starting, please wait for started before abort."
return ""
def abort_app_on_server(self, job_id: str, turn_to_cold: bool = False) -> str:
if job_id not in self.run_processes.keys():
return "Server app has not started."
self.logger.info("Abort the server app run.")
command_data = Shareable()
command_data.set_header(ServerCommandKey.TURN_TO_COLD, turn_to_cold)
try:
status_message = self.send_command_to_child_runner_process(
job_id=job_id,
command_name=AdminCommandNames.ABORT,
command_data=command_data,
timeout=1.0,
optional=True,
)
self.logger.info(f"Abort server status: {status_message}")
except Exception:
with self.lock:
child_process = self.run_processes.get(job_id, {}).get(RunProcessKey.CHILD_PROCESS, None)
if child_process:
child_process.terminate()
finally:
threading.Thread(target=self._remove_run_processes, args=[job_id]).start()
self.engine_info.status = MachineStatus.STOPPED
return ""
def _remove_run_processes(self, job_id):
# wait for the run process to gracefully terminated, and ensure to remove from run_processes.
max_wait = 5.0
start = time.time()
while True:
if job_id not in self.run_processes:
# job already gone
return
if time.time() - start >= max_wait:
break
time.sleep(0.1)
self.run_processes.pop(job_id, None)
def check_app_start_readiness(self, job_id: str) -> str:
if job_id not in self.run_processes.keys():
return f"Server app run: {job_id} has not started."
return ""
def shutdown_server(self) -> str:
status = self.server.status
if status == ServerStatus.STARTING:
return "Server app is starting, please wait for started before shutdown."
self.logger.info("FL server shutdown.")
touch_file = os.path.join(self.args.workspace, "shutdown.fl")
_ = self.executor.submit(lambda p: server_shutdown(*p), [self.server, touch_file])
# while self.server.status != ServerStatus.SHUTDOWN:
# time.sleep(1.0)
return ""
def restart_server(self) -> str:
status = self.server.status
if status == ServerStatus.STARTING:
return "Server is starting, please wait for started before restart."
self.logger.info("FL server restart.")
touch_file = os.path.join(self.args.workspace, "restart.fl")
_ = self.executor.submit(lambda p: server_shutdown(*p), [self.server, touch_file])
# while self.server.status != ServerStatus.SHUTDOWN:
# time.sleep(1.0)
return ""
def get_widget(self, widget_id: str) -> Widget:
return self.widgets.get(widget_id)
def get_client_name_from_token(self, token: str) -> str:
client = self.server.client_manager.clients.get(token)
if client:
return client.name
else:
return ""
def get_client_from_name(self, client_name):
return self.client_manager.get_client_from_name(client_name)
def get_app_data(self, app_name: str) -> Tuple[str, object]:
fullpath_src = os.path.join(self.server.admin_server.file_upload_dir, app_name)
if not os.path.exists(fullpath_src):
return f"App folder '{app_name}' does not exist in staging area.", None
data = zip_directory_to_bytes(fullpath_src, "")
return "", data
def get_app_run_info(self, job_id) -> Optional[RunInfo]:
run_info = None
try:
run_info = self.send_command_to_child_runner_process(
job_id=job_id,
command_name=ServerCommandNames.GET_RUN_INFO,
command_data={},
)
except Exception:
self.logger.error(f"Failed to get_app_run_info for run: {job_id}")
return run_info
def set_run_manager(self, run_manager: RunManager):
self.run_manager = run_manager
for _, widget in self.widgets.items():
self.run_manager.add_handler(widget)
def set_job_runner(self, job_runner: JobRunner, job_manager: JobDefManagerSpec):
self.job_runner = job_runner
self.job_def_manager = job_manager
def set_configurator(self, conf: ServerJsonConfigurator):
if not isinstance(conf, ServerJsonConfigurator):
raise TypeError("conf must be ServerJsonConfigurator but got {}".format(type(conf)))
self.conf = conf
def build_component(self, config_dict):
return self.conf.build_component(config_dict)
def new_context(self) -> FLContext:
if self.run_manager:
return self.run_manager.new_context()
else:
# return FLContext()
return FLContextManager(
engine=self, identity_name=self.server.project_name, job_id="", public_stickers={}, private_stickers={}
).new_context()
def get_component(self, component_id: str) -> object:
return self.run_manager.get_component(component_id)
def fire_event(self, event_type: str, fl_ctx: FLContext):
self.run_manager.fire_event(event_type, fl_ctx)
def get_staging_path_of_app(self, app_name: str) -> str:
return os.path.join(self.server.admin_server.file_upload_dir, app_name)
def deploy_app_to_server(self, run_destination: str, app_name: str, app_staging_path: str) -> str:
return self.deploy_app(run_destination, app_name, WorkspaceConstants.APP_PREFIX + "server")
def get_workspace(self) -> Workspace:
return self.run_manager.get_workspace()
def ask_to_stop(self):
self.asked_to_stop = True
def deploy_app(self, job_id, src, dst):
fullpath_src = os.path.join(self.server.admin_server.file_upload_dir, src)
fullpath_dst = os.path.join(self._get_run_folder(job_id), dst)
if not os.path.exists(fullpath_src):
return f"App folder '{src}' does not exist in staging area."
if os.path.exists(fullpath_dst):
shutil.rmtree(fullpath_dst)
shutil.copytree(fullpath_src, fullpath_dst)
app_file = os.path.join(self._get_run_folder(job_id), "fl_app.txt")
if os.path.exists(app_file):
os.remove(app_file)
with open(app_file, "wt") as f:
f.write(f"{src}")
return ""
def remove_clients(self, clients: List[str]) -> str:
for client in clients:
self._remove_dead_client(client)
return ""
def _remove_dead_client(self, token):
_ = self.server.client_manager.remove_client(token)
self.server.remove_client_data(token)
if self.server.admin_server:
self.server.admin_server.client_dead(token)
def register_aux_message_handler(self, topic: str, message_handle_func):
self.run_manager.aux_runner.register_aux_message_handler(topic, message_handle_func)
def send_aux_request(
self,
targets: [],
topic: str,
request: Shareable,
timeout: float,
fl_ctx: FLContext,
optional=False,
secure=False,
) -> dict:
try:
if not targets:
targets = []
for t in self.get_clients():
targets.append(t.name)
if targets:
return self.run_manager.aux_runner.send_aux_request(
targets=targets,
topic=topic,
request=request,
timeout=timeout,
fl_ctx=fl_ctx,
optional=optional,
secure=secure,
)
else:
return {}
except Exception as e:
self.logger.error(f"Failed to send the aux_message: {topic} with exception: {secure_format_exception(e)}.")
def sync_clients_from_main_process(self):
# repeatedly ask the parent process to get participating clients until we receive the result
# or timed out after 30 secs (already tried 30 times).
start = time.time()
max_wait = 30.0
job_id = self.args.job_id
while True:
clients = self._retrieve_clients_data(job_id)
if clients:
self.client_manager.clients = clients
self.logger.debug(f"received participating clients {clients}")
return
if time.time() - start >= max_wait:
self.logger.critical(f"Cannot get participating clients for job {job_id} after {max_wait} seconds")
raise RuntimeError("Exiting job process: Cannot get participating clients for job {job_id}")
self.logger.debug("didn't receive clients info - retry in 1 second")
time.sleep(1.0)
def _get_participating_clients(self):
# called from server's job cell
return self.client_manager.clients
def _retrieve_clients_data(self, job_id):
request = new_cell_message({CellMessageHeaderKeys.JOB_ID: job_id}, {})
return_data = self.server.cell.send_request(
target=FQCN.ROOT_SERVER,
channel=CellChannel.SERVER_PARENT_LISTENER,
topic=ServerCommandNames.GET_CLIENTS,
request=request,
timeout=5.0,
optional=True,
)
rc = return_data.get_header(MessageHeaderKey.RETURN_CODE, CellMsgReturnCode.OK)
if rc != CellMsgReturnCode.OK:
self.logger.debug(f"cannot retrieve clients from parent: {rc}")
return None
data = return_data.payload
clients = data.get(ServerCommandKey.CLIENTS, None)
if clients is None:
self.logger.error(f"parent failed to return clients info for job {job_id}")
return clients
def update_job_run_status(self):
with self.new_context() as fl_ctx:
execution_error = fl_ctx.get_prop(FLContextKey.FATAL_SYSTEM_ERROR, False)
data = {"execution_error": execution_error}
job_id = fl_ctx.get_job_id()
request = new_cell_message({CellMessageHeaderKeys.JOB_ID: job_id}, data)
return_data = self.server.cell.fire_and_forget(
targets=FQCN.ROOT_SERVER,
channel=CellChannel.SERVER_PARENT_LISTENER,
topic=ServerCommandNames.UPDATE_RUN_STATUS,
message=request,
)
def send_command_to_child_runner_process(
self, job_id: str, command_name: str, command_data, timeout=5.0, optional=False
):
with self.lock:
fqcn = FQCN.join([FQCN.ROOT_SERVER, job_id])
request = new_cell_message({}, command_data)
if timeout <= 0.0:
self.server.cell.fire_and_forget(
targets=fqcn,
channel=CellChannel.SERVER_COMMAND,
topic=command_name,
request=request,
optional=optional,
)
return None
return_data = self.server.cell.send_request(
target=fqcn,
channel=CellChannel.SERVER_COMMAND,
topic=command_name,
request=request,
timeout=timeout,
optional=optional,
)
rc = return_data.get_header(MessageHeaderKey.RETURN_CODE, CellMsgReturnCode.OK)
if rc == CellMsgReturnCode.OK:
result = return_data.payload
else:
result = None
return result
def persist_components(self, fl_ctx: FLContext, completed: bool):
if not self.server.ha_mode:
return
self.logger.info("Start saving snapshot on server.")
# Call the State Persistor to persist all the component states
# 1. call every component to generate the component states data
# Make sure to include the current round number
# 2. call persistence API to save the component states
try:
job_id = fl_ctx.get_job_id()
snapshot = RunSnapshot(job_id)
for component_id, component in self.run_manager.components.items():
if isinstance(component, FLComponent):
snapshot.set_component_snapshot(
component_id=component_id, component_state=component.get_persist_state(fl_ctx)
)
snapshot.set_component_snapshot(
component_id=SnapshotKey.FL_CONTEXT, component_state=copy.deepcopy(get_serializable_data(fl_ctx).props)
)
workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT)
data = zip_directory_to_bytes(workspace.get_run_dir(fl_ctx.get_prop(FLContextKey.CURRENT_RUN)), "")
snapshot.set_component_snapshot(component_id=SnapshotKey.WORKSPACE, component_state={"content": data})
job_info = fl_ctx.get_prop(FLContextKey.JOB_INFO)
if not job_info:
job_clients = self._get_participating_clients()
fl_ctx.set_prop(FLContextKey.JOB_INFO, (job_id, job_clients))
else:
(job_id, job_clients) = job_info
snapshot.set_component_snapshot(
component_id=SnapshotKey.JOB_INFO,
component_state={SnapshotKey.JOB_CLIENTS: job_clients, SnapshotKey.JOB_ID: job_id},
)
snapshot.completed = completed
self.server.snapshot_location = self.snapshot_persistor.save(snapshot=snapshot)
if not completed:
self.logger.info(f"persist the snapshot to: {self.server.snapshot_location}")
else:
self.logger.info(f"The snapshot: {self.server.snapshot_location} has been removed.")
except Exception as e:
self.logger.error(f"Failed to persist the components. {secure_format_exception(e)}")
def restore_components(self, snapshot: RunSnapshot, fl_ctx: FLContext):
for component_id, component in self.run_manager.components.items():
if isinstance(component, FLComponent):
component.restore(snapshot.get_component_snapshot(component_id=component_id), fl_ctx)
fl_ctx.props.update(snapshot.get_component_snapshot(component_id=SnapshotKey.FL_CONTEXT))
def dispatch(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
return self.run_manager.aux_runner.dispatch(topic=topic, request=request, fl_ctx=fl_ctx)
def show_stats(self, job_id) -> dict:
stats = None
try:
stats = self.send_command_to_child_runner_process(
job_id=job_id,
command_name=ServerCommandNames.SHOW_STATS,
command_data={},
)
except Exception:
self.logger.error(f"Failed to show_stats for JOB: {job_id}")
if stats is None:
stats = {}
return stats
def get_errors(self, job_id) -> dict:
errors = None
try:
errors = self.send_command_to_child_runner_process(
job_id=job_id,
command_name=ServerCommandNames.GET_ERRORS,
command_data={},
)
except Exception:
self.logger.error(f"Failed to get_errors for JOB: {job_id}")
if errors is None:
errors = {}
return errors
def reset_errors(self, job_id) -> str:
errors = None
try:
self.send_command_to_child_runner_process(
job_id=job_id,
command_name=ServerCommandNames.RESET_ERRORS,
command_data={},
)
except Exception:
self.logger.error(f"Failed to reset_errors for JOB: {job_id}")
return f"reset the server error stats for job: {job_id}"
def _send_admin_requests(self, requests, timeout_secs=10) -> List[ClientReply]:
return self.server.admin_server.send_requests(requests, timeout_secs=timeout_secs)
def check_client_resources(self, job_id: str, resource_reqs) -> Dict[str, Tuple[bool, str]]:
requests = {}
for site_name, resource_requirements in resource_reqs.items():
# assume server resource is unlimited
if site_name == "server":
continue
request = Message(topic=TrainingTopic.CHECK_RESOURCE, body=resource_requirements)
request.set_header(RequestHeader.JOB_ID, job_id)
client = self.get_client_from_name(site_name)
if client:
requests.update({client.token: request})
replies = []
if requests:
replies = self._send_admin_requests(requests, 15)
result = {}
for r in replies:
site_name = r.client_name
if r.reply:
error_code = r.reply.get_header(MsgHeader.RETURN_CODE, ReturnCode.OK)
if error_code != ReturnCode.OK:
self.logger.error(f"Client reply error: {r.reply.body}")
result[site_name] = (False, "")
else:
resp = r.reply.body
result[site_name] = (
resp.get_header(ShareableHeader.IS_RESOURCE_ENOUGH, False),
resp.get_header(ShareableHeader.RESOURCE_RESERVE_TOKEN, ""),
)
else:
result[site_name] = (False, "")
return result
def cancel_client_resources(
self, resource_check_results: Dict[str, Tuple[bool, str]], resource_reqs: Dict[str, dict]
):
requests = {}
for site_name, result in resource_check_results.items():
is_resource_enough, token = result
if is_resource_enough and token:
resource_requirements = resource_reqs[site_name]
request = Message(topic=TrainingTopic.CANCEL_RESOURCE, body=resource_requirements)
request.set_header(ShareableHeader.RESOURCE_RESERVE_TOKEN, token)
client = self.get_client_from_name(site_name)
if client:
requests.update({client.token: request})
if requests:
_ = self._send_admin_requests(requests)
def start_client_job(self, job_id, client_sites):
requests = {}
for site, dispatch_info in client_sites.items():
resource_requirement = dispatch_info.resource_requirements
token = dispatch_info.token
request = Message(topic=TrainingTopic.START_JOB, body=resource_requirement)
request.set_header(RequestHeader.JOB_ID, job_id)
request.set_header(ShareableHeader.RESOURCE_RESERVE_TOKEN, token)
client = self.get_client_from_name(site)
if client:
requests.update({client.token: request})
replies = []
if requests:
replies = self._send_admin_requests(requests, timeout_secs=20)
return replies
def stop_all_jobs(self):
fl_ctx = self.new_context()
self.job_runner.stop_all_runs(fl_ctx)
def pause_server_jobs(self):
running_jobs = list(self.run_processes.keys())
for job_id in running_jobs:
self.job_runner.remove_running_job(job_id)
self.abort_app_on_server(job_id, turn_to_cold=True)
def close(self):
self.executor.shutdown()
def server_shutdown(server, touch_file):
with open(touch_file, "a"):
os.utime(touch_file, None)
try:
# server.admin_server.stop()
server.fl_shutdown()
# time.sleep(3.0)
finally:
security_close()
server.status = ServerStatus.SHUTDOWN
# sys.exit(2)
| NVFlare-main | nvflare/private/fed/server/server_engine.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
import threading
import time
from typing import Dict, List, Tuple
from nvflare.apis.client import Client
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import AdminCommandNames, FLContextKey, RunProcessKey, SystemComponents
from nvflare.apis.fl_context import FLContext
from nvflare.apis.job_def import ALL_SITES, Job, JobMetaKey, RunStatus
from nvflare.apis.job_scheduler_spec import DispatchInfo
from nvflare.apis.workspace import Workspace
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.fuel.utils.zip_utils import zip_directory_to_file
from nvflare.lighter.utils import verify_folder_signature
from nvflare.private.admin_defs import Message, MsgHeader, ReturnCode
from nvflare.private.defs import RequestHeader, TrainingTopic
from nvflare.private.fed.server.admin import check_client_replies
from nvflare.private.fed.server.server_state import HotState
from nvflare.private.fed.utils.app_deployer import AppDeployer
from nvflare.security.logging import secure_format_exception
def _send_to_clients(admin_server, client_sites: List[str], engine, message, timeout=None, optional=False):
clients, invalid_inputs = engine.validate_targets(client_sites)
if invalid_inputs:
raise RuntimeError(f"unknown clients: {invalid_inputs}.")
requests = {}
for c in clients:
requests.update({c.token: message})
if timeout is None:
timeout = admin_server.timeout
replies = admin_server.send_requests(requests, timeout_secs=timeout, optional=optional)
return replies
def _get_active_job_participants(connected_clients: Dict[str, Client], participants: Dict[str, Client]) -> List[str]:
"""Gets active job participants.
Some clients might be dropped/dead during job execution.
No need to abort those clients.
Args:
connected_clients: Clients that are currently connected.
participants: Clients that were participating when the job started.
Returns:
A list of active job participants name.
"""
client_sites_names = []
for token, client in participants.items():
if token in connected_clients:
client_sites_names.append(client.name)
return client_sites_names
class JobRunner(FLComponent):
def __init__(self, workspace_root: str) -> None:
super().__init__()
self.workspace_root = workspace_root
self.ask_to_stop = False
self.scheduler = None
self.running_jobs = {}
self.lock = threading.Lock()
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.SYSTEM_START:
engine = fl_ctx.get_engine()
self.scheduler = engine.get_component(SystemComponents.JOB_SCHEDULER)
elif event_type in [EventType.JOB_COMPLETED, EventType.END_RUN]:
self._save_workspace(fl_ctx)
elif event_type == EventType.SYSTEM_END:
self.stop()
@staticmethod
def _make_deploy_message(job: Job, app_data, app_name):
message = Message(topic=TrainingTopic.DEPLOY, body=app_data)
message.set_header(RequestHeader.REQUIRE_AUTHZ, "true")
message.set_header(RequestHeader.ADMIN_COMMAND, AdminCommandNames.SUBMIT_JOB)
message.set_header(RequestHeader.JOB_ID, job.job_id)
message.set_header(RequestHeader.APP_NAME, app_name)
message.set_header(RequestHeader.SUBMITTER_NAME, job.meta.get(JobMetaKey.SUBMITTER_NAME))
message.set_header(RequestHeader.SUBMITTER_ORG, job.meta.get(JobMetaKey.SUBMITTER_ORG))
message.set_header(RequestHeader.SUBMITTER_ROLE, job.meta.get(JobMetaKey.SUBMITTER_ROLE))
message.set_header(RequestHeader.USER_NAME, job.meta.get(JobMetaKey.SUBMITTER_NAME))
message.set_header(RequestHeader.USER_ORG, job.meta.get(JobMetaKey.SUBMITTER_ORG))
message.set_header(RequestHeader.USER_ROLE, job.meta.get(JobMetaKey.SUBMITTER_ROLE))
message.set_header(RequestHeader.JOB_META, json.dumps(job.meta))
return message
def _deploy_job(self, job: Job, sites: dict, fl_ctx: FLContext) -> Tuple[str, list]:
"""Deploy the application to the list of participants
Args:
job: job to be deployed
sites: participating sites
fl_ctx: FLContext
Returns: job id, failed_clients
"""
fl_ctx.remove_prop(FLContextKey.JOB_RUN_NUMBER)
fl_ctx.remove_prop(FLContextKey.JOB_DEPLOY_DETAIL)
engine = fl_ctx.get_engine()
run_number = job.job_id
fl_ctx.set_prop(FLContextKey.JOB_RUN_NUMBER, run_number)
workspace = Workspace(root_dir=self.workspace_root, site_name="server")
client_deploy_requests = {}
client_token_to_name = {}
client_token_to_reply = {}
deploy_detail = []
fl_ctx.set_prop(FLContextKey.JOB_DEPLOY_DETAIL, deploy_detail)
for app_name, participants in job.get_deployment().items():
app_data = job.get_application(app_name, fl_ctx)
if len(participants) == 1 and participants[0].upper() == ALL_SITES:
participants = ["server"]
participants.extend([client.name for client in engine.get_clients()])
client_sites = []
for p in participants:
if p == "server":
app_deployer = AppDeployer()
err = app_deployer.deploy(
app_name=app_name,
workspace=workspace,
job_id=job.job_id,
job_meta=job.meta,
app_data=app_data,
fl_ctx=fl_ctx,
)
if err:
deploy_detail.append(f"server: {err}")
raise RuntimeError(f"Failed to deploy app '{app_name}': {err}")
kv_list = parse_vars(engine.args.set)
secure_train = kv_list.get("secure_train", True)
from_hub_site = job.meta.get(JobMetaKey.FROM_HUB_SITE.value)
if secure_train and not from_hub_site:
app_path = workspace.get_app_dir(job.job_id)
root_ca_path = os.path.join(workspace.get_startup_kit_dir(), "rootCA.pem")
if not verify_folder_signature(app_path, root_ca_path):
err = "job signature verification failed"
deploy_detail.append(f"server: {err}")
raise RuntimeError(f"Failed to verify app '{app_name}': {err}")
self.log_info(
fl_ctx, f"Application {app_name} deployed to the server for job: {run_number}", fire_event=False
)
deploy_detail.append("server: OK")
else:
if p in sites:
client_sites.append(p)
if client_sites:
message = self._make_deploy_message(job, app_data, app_name)
clients, invalid_inputs = engine.validate_targets(client_sites)
if invalid_inputs:
deploy_detail.append("invalid_clients: {}".format(",".join(invalid_inputs)))
raise RuntimeError(f"unknown clients: {invalid_inputs}.")
for c in clients:
assert isinstance(c, Client)
client_token_to_name[c.token] = c.name
client_deploy_requests[c.token] = message
client_token_to_reply[c.token] = None
display_sites = ",".join(client_sites)
self.log_info(
fl_ctx,
f"App {app_name} to be deployed to the clients: {display_sites} for run: {run_number}",
fire_event=False,
)
abort_job = False
failed_clients = []
if client_deploy_requests:
engine = fl_ctx.get_engine()
admin_server = engine.server.admin_server
client_token_to_reply = admin_server.send_requests_and_get_reply_dict(
client_deploy_requests, timeout_secs=admin_server.timeout
)
# check replies and see whether required clients are okay
for client_token, reply in client_token_to_reply.items():
client_name = client_token_to_name[client_token]
if reply:
assert isinstance(reply, Message)
rc = reply.get_header(MsgHeader.RETURN_CODE, ReturnCode.OK)
if rc != ReturnCode.OK:
failed_clients.append(client_name)
deploy_detail.append(f"{client_name}: {reply.body}")
else:
deploy_detail.append(f"{client_name}: OK")
else:
deploy_detail.append(f"{client_name}: unknown")
# see whether any of the failed clients are required
if failed_clients:
num_ok_sites = len(client_deploy_requests) - len(failed_clients)
if job.min_sites and num_ok_sites < job.min_sites:
abort_job = True
deploy_detail.append(f"num_ok_sites {num_ok_sites} < required_min_sites {job.min_sites}")
elif job.required_sites:
for c in failed_clients:
if c in job.required_sites:
abort_job = True
deploy_detail.append(f"failed to deploy to required client {c}")
if abort_job:
raise RuntimeError("deploy failure", deploy_detail)
self.fire_event(EventType.JOB_DEPLOYED, fl_ctx)
return run_number, failed_clients
def _start_run(self, job_id: str, job: Job, client_sites: Dict[str, DispatchInfo], fl_ctx: FLContext):
"""Start the application
Args:
job_id: job_id
client_sites: participating sites
fl_ctx: FLContext
"""
engine = fl_ctx.get_engine()
job_clients = engine.get_job_clients(client_sites)
err = engine.start_app_on_server(job_id, job=job, job_clients=job_clients)
if err:
raise RuntimeError(f"Could not start the server App for job: {job_id}.")
replies = engine.start_client_job(job_id, client_sites)
client_sites_names = list(client_sites.keys())
check_client_replies(replies=replies, client_sites=client_sites_names, command=f"start job ({job_id})")
display_sites = ",".join(client_sites_names)
self.log_info(fl_ctx, f"Started run: {job_id} for clients: {display_sites}")
self.fire_event(EventType.JOB_STARTED, fl_ctx)
def _stop_run(self, job_id, fl_ctx: FLContext):
"""Stop the application
Args:
job_id: job_id to be stopped
fl_ctx: FLContext
"""
engine = fl_ctx.get_engine()
run_process = engine.run_processes.get(job_id)
if run_process:
participants: Dict[str, Client] = run_process.get(RunProcessKey.PARTICIPANTS)
active_client_sites_names = _get_active_job_participants(
connected_clients=engine.client_manager.clients, participants=participants
)
self.abort_client_run(job_id, active_client_sites_names, fl_ctx)
err = engine.abort_app_on_server(job_id)
if err:
self.log_error(fl_ctx, f"Failed to abort the server for run: {job_id}: {err}")
def abort_client_run(self, job_id, client_sites: List[str], fl_ctx):
"""Send the abort run command to the clients
Args:
job_id: job_id
client_sites: Clients to be aborted
fl_ctx: FLContext
"""
engine = fl_ctx.get_engine()
admin_server = engine.server.admin_server
message = Message(topic=TrainingTopic.ABORT, body="")
message.set_header(RequestHeader.JOB_ID, str(job_id))
self.log_debug(fl_ctx, f"Send abort command to the clients for run: {job_id}")
try:
_ = _send_to_clients(admin_server, client_sites, engine, message, timeout=2.0, optional=True)
# There isn't much we can do here if a client didn't get the message or send a reply
# check_client_replies(replies=replies, client_sites=client_sites, command="abort the run")
except RuntimeError as e:
self.log_error(fl_ctx, f"Failed to abort run ({job_id}) on the clients: {secure_format_exception(e)}")
def _delete_run(self, job_id, client_sites: List[str], fl_ctx: FLContext):
"""Deletes the run workspace
Args:
job_id: job_id
client_sites: participating sites
fl_ctx: FLContext
"""
engine = fl_ctx.get_engine()
admin_server = engine.server.admin_server
message = Message(topic=TrainingTopic.DELETE_RUN, body="")
message.set_header(RequestHeader.JOB_ID, str(job_id))
self.log_debug(fl_ctx, f"Send delete_run command to the clients for run: {job_id}")
try:
replies = _send_to_clients(admin_server, client_sites, engine, message)
check_client_replies(replies=replies, client_sites=client_sites, command="send delete_run command")
except RuntimeError as e:
self.log_error(
fl_ctx, f"Failed to execute delete run ({job_id}) on the clients: {secure_format_exception(e)}"
)
err = engine.delete_job_id(job_id)
if err:
self.log_error(fl_ctx, f"Failed to delete_run the server for run: {job_id}")
def _job_complete_process(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
while not self.ask_to_stop:
for job_id in list(self.running_jobs.keys()):
if job_id not in engine.run_processes.keys():
job = self.running_jobs.get(job_id)
if job:
if not job.run_aborted:
self._update_job_status(engine, job, job_manager, fl_ctx)
with self.lock:
del self.running_jobs[job_id]
fl_ctx.set_prop(FLContextKey.CURRENT_JOB_ID, job.job_id)
self.fire_event(EventType.JOB_COMPLETED, fl_ctx)
self.log_debug(fl_ctx, f"Finished running job:{job.job_id}")
engine.remove_exception_process(job_id)
time.sleep(1.0)
def _update_job_status(self, engine, job, job_manager, fl_ctx):
exception_run_processes = engine.exception_run_processes
if job.job_id in exception_run_processes:
self.log_info(fl_ctx, f"Try to abort job ({job.job_id}) on clients ...")
run_process = exception_run_processes[job.job_id]
# stop client run
participants: Dict[str, Client] = run_process.get(RunProcessKey.PARTICIPANTS)
active_client_sites_names = _get_active_job_participants(
connected_clients=engine.client_manager.clients, participants=participants
)
self.abort_client_run(job.job_id, active_client_sites_names, fl_ctx)
finished = run_process.get(RunProcessKey.PROCESS_FINISHED, False)
if finished:
# job status is already reported from the Job cell!
exe_err = run_process.get(RunProcessKey.PROCESS_EXE_ERROR, False)
if exe_err:
status = RunStatus.FINISHED_EXECUTION_EXCEPTION
else:
status = RunStatus.FINISHED_COMPLETED
else:
# never got job status report from job cell
process_return_code = run_process.get(RunProcessKey.PROCESS_RETURN_CODE)
if process_return_code == -9:
status = RunStatus.FINISHED_ABNORMAL
else:
status = RunStatus.FINISHED_EXECUTION_EXCEPTION
else:
status = RunStatus.FINISHED_COMPLETED
job_manager.set_status(job.job_id, status, fl_ctx)
def _save_workspace(self, fl_ctx: FLContext):
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_JOB_ID)
workspace = Workspace(root_dir=self.workspace_root)
run_dir = workspace.get_run_dir(job_id)
engine = fl_ctx.get_engine()
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
with tempfile.TemporaryDirectory() as td:
output_file = os.path.join(td, "workspace")
zip_directory_to_file(run_dir, "", output_file)
job_manager.save_workspace(job_id, output_file, fl_ctx)
self.log_info(fl_ctx, f"Workspace zipped to {output_file}")
shutil.rmtree(run_dir)
def run(self, fl_ctx: FLContext):
"""Starts job runner."""
engine = fl_ctx.get_engine()
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
if job_manager:
thread = threading.Thread(target=self._job_complete_process, args=[fl_ctx])
thread.start()
while not self.ask_to_stop:
if not isinstance(engine.server.server_state, HotState):
time.sleep(1.0)
continue
approved_jobs = job_manager.get_jobs_by_status(RunStatus.SUBMITTED, fl_ctx)
self.log_debug(
fl_ctx, f"{fl_ctx.get_identity_name()} Got approved_jobs: {approved_jobs} from the job_manager"
)
if self.scheduler:
ready_job, sites = self.scheduler.schedule_job(
job_manager=job_manager, job_candidates=approved_jobs, fl_ctx=fl_ctx
)
if ready_job:
if self._check_job_status(job_manager, ready_job.job_id, RunStatus.SUBMITTED, fl_ctx):
self.log_info(fl_ctx, f"Job: {ready_job.job_id} is not in SUBMITTED. It won't be deployed.")
continue
client_sites = {k: v for k, v in sites.items() if k != "server"}
job_id = None
try:
self.log_info(fl_ctx, f"Got the job: {ready_job.job_id} from the scheduler to run")
fl_ctx.set_prop(FLContextKey.CURRENT_JOB_ID, ready_job.job_id)
job_id, failed_clients = self._deploy_job(ready_job, sites, fl_ctx)
job_manager.set_status(ready_job.job_id, RunStatus.DISPATCHED, fl_ctx)
deploy_detail = fl_ctx.get_prop(FLContextKey.JOB_DEPLOY_DETAIL)
if deploy_detail:
job_manager.update_meta(
ready_job.job_id,
{
JobMetaKey.JOB_DEPLOY_DETAIL.value: deploy_detail,
JobMetaKey.SCHEDULE_COUNT.value: ready_job.meta[
JobMetaKey.SCHEDULE_COUNT.value
],
JobMetaKey.LAST_SCHEDULE_TIME.value: ready_job.meta[
JobMetaKey.LAST_SCHEDULE_TIME.value
],
JobMetaKey.SCHEDULE_HISTORY.value: ready_job.meta[
JobMetaKey.SCHEDULE_HISTORY.value
],
},
fl_ctx,
)
if failed_clients:
deployable_clients = {k: v for k, v in client_sites.items() if k not in failed_clients}
else:
deployable_clients = client_sites
if self._check_job_status(job_manager, ready_job.job_id, RunStatus.DISPATCHED, fl_ctx):
self.log_info(
fl_ctx, f"Job: {ready_job.job_id} is not in DISPATCHED. It won't be start to run."
)
continue
self._start_run(
job_id=job_id,
job=ready_job,
client_sites=deployable_clients,
fl_ctx=fl_ctx,
)
with self.lock:
self.running_jobs[job_id] = ready_job
job_manager.set_status(ready_job.job_id, RunStatus.RUNNING, fl_ctx)
except Exception as e:
if job_id:
if job_id in self.running_jobs:
with self.lock:
del self.running_jobs[job_id]
self._stop_run(job_id, fl_ctx)
job_manager.set_status(ready_job.job_id, RunStatus.FAILED_TO_RUN, fl_ctx)
deploy_detail = fl_ctx.get_prop(FLContextKey.JOB_DEPLOY_DETAIL)
if deploy_detail:
job_manager.update_meta(
ready_job.job_id, {JobMetaKey.JOB_DEPLOY_DETAIL.value: deploy_detail}, fl_ctx
)
self.fire_event(EventType.JOB_ABORTED, fl_ctx)
self.log_error(
fl_ctx, f"Failed to run the Job ({ready_job.job_id}): {secure_format_exception(e)}"
)
time.sleep(1.0)
thread.join()
else:
self.log_error(fl_ctx, "There's no Job Manager defined. Won't be able to run the jobs.")
@staticmethod
def _check_job_status(job_manager, job_id, job_run_status, fl_ctx: FLContext):
reload_job = job_manager.get_job(job_id, fl_ctx)
return reload_job.meta.get(JobMetaKey.STATUS) != job_run_status
def stop(self):
self.ask_to_stop = True
def restore_running_job(self, run_number: str, job_id: str, job_clients, snapshot, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
try:
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
job = job_manager.get_job(jid=job_id, fl_ctx=fl_ctx)
err = engine.start_app_on_server(run_number, job=job, job_clients=job_clients, snapshot=snapshot)
if err:
raise RuntimeError(f"Could not restore the server App for job: {job_id}.")
with self.lock:
self.running_jobs[job_id] = job
self.scheduler.restore_scheduled_job(job_id)
except Exception as e:
self.log_error(
fl_ctx, f"Failed to restore the job: {job_id} to the running job table: {secure_format_exception(e)}."
)
def update_abnormal_finished_jobs(self, running_job_ids, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
all_jobs = self._get_all_running_jobs(job_manager, fl_ctx)
for job in all_jobs:
if job.job_id not in running_job_ids:
try:
job_manager.set_status(job.job_id, RunStatus.FINISHED_ABNORMAL, fl_ctx)
self.logger.info(f"Update the previous running job: {job.job_id} to {RunStatus.FINISHED_ABNORMAL}.")
except Exception as e:
self.log_error(
fl_ctx,
f"Failed to update the job: {job.job_id} to {RunStatus.FINISHED_ABNORMAL}: "
f"{secure_format_exception(e)}.",
)
def update_unfinished_jobs(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
all_jobs = self._get_all_running_jobs(job_manager, fl_ctx)
for job in all_jobs:
try:
job_manager.set_status(job.job_id, RunStatus.ABANDONED, fl_ctx)
self.logger.info(f"Update the previous running job: {job.job_id} to {RunStatus.ABANDONED}.")
except Exception as e:
self.log_error(
fl_ctx,
f"Failed to update the job: {job.job_id} to {RunStatus.ABANDONED}: {secure_format_exception(e)}.",
)
@staticmethod
def _get_all_running_jobs(job_manager, fl_ctx):
all_jobs = []
dispatched_jobs = job_manager.get_jobs_by_status(RunStatus.DISPATCHED, fl_ctx)
all_jobs.extend(dispatched_jobs)
running_jobs = job_manager.get_jobs_by_status(RunStatus.RUNNING, fl_ctx)
all_jobs.extend(running_jobs)
return all_jobs
def stop_run(self, job_id: str, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
self._stop_run(job_id, fl_ctx)
job = self.running_jobs.get(job_id)
if job:
self.log_info(fl_ctx, f"Stop the job run: {job_id}")
fl_ctx.set_prop(FLContextKey.CURRENT_JOB_ID, job.job_id)
job.run_aborted = True
job_manager.set_status(job.job_id, RunStatus.FINISHED_ABORTED, fl_ctx)
self.fire_event(EventType.JOB_ABORTED, fl_ctx)
return ""
else:
self.log_error(fl_ctx, f"Job {job_id} is not running. It can not be stopped.")
return f"Job {job_id} is not running."
def stop_all_runs(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
for job_id in engine.run_processes.keys():
self.stop_run(job_id, fl_ctx)
self.log_info(fl_ctx, "Stop all the running jobs.")
# also stop the job runner
self.ask_to_stop = True
def remove_running_job(self, job_id: str):
with self.lock:
if job_id in self.running_jobs:
del self.running_jobs[job_id]
self.scheduler.remove_scheduled_job(job_id)
| NVFlare-main | nvflare/private/fed/server/job_runner.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from abc import ABC, abstractmethod
from typing import Optional
from nvflare.apis.client import Client
from nvflare.apis.fl_constant import MachineStatus
from nvflare.apis.job_def import Job
from nvflare.apis.job_def_manager_spec import JobDefManagerSpec
from nvflare.apis.server_engine_spec import ServerEngineSpec
from .job_runner import JobRunner
from .run_info import RunInfo
from .run_manager import RunManager
from .server_json_config import ServerJsonConfigurator
class EngineInfo(object):
def __init__(self):
"""Engine information."""
self.start_time = time.time()
self.status = MachineStatus.STOPPED
self.app_names = {}
class ServerEngineInternalSpec(ServerEngineSpec, ABC):
@abstractmethod
def get_engine_info(self) -> EngineInfo:
"""Get general info of the engine."""
pass
@abstractmethod
def get_staging_path_of_app(self, app_name: str) -> str:
"""Get the staging path of the app waiting to be deployed.
Args:
app_name (str): application name
Returns:
The app's folder path or empty string if the app doesn't exist
"""
pass
@abstractmethod
def deploy_app_to_server(self, job_id: str, app_name: str, app_staging_path: str) -> str:
"""Deploy the specified app to the server.
Copy the app folder tree from staging area to the server's RUN area
Args:
job_id: job id of the app to be deployed
app_name: name of the app to be deployed
app_staging_path: the full path to the app folder in staging area
Returns:
An error message. An empty string if successful.
"""
pass
@abstractmethod
def get_app_data(self, app_name: str) -> (str, object):
"""Get data for deploying the app.
Args:
app_name: name of the app
Returns:
An error message. An empty string if successful.
"""
pass
@abstractmethod
def get_app_run_info(self, job_id) -> Optional[RunInfo]:
"""Gets the app RunInfo from the child process."""
pass
@abstractmethod
def delete_job_id(self, job_id: str) -> str:
"""Delete specified RUN.
The Engine must do status check before the run can be deleted.
Args:
job_id: job id
Returns:
An error message. An empty string if successful.
"""
pass
@abstractmethod
def start_app_on_server(self, run_number: str, job: Job = None, job_clients=None, snapshot=None) -> str:
"""Start the FL app on Server.
Returns:
An error message. An empty string if successful.
"""
pass
@abstractmethod
def check_app_start_readiness(self, job_id: str) -> str:
"""Check whether the app is ready to start.
Returns:
An error message. An empty string if successful.
"""
pass
@abstractmethod
def abort_app_on_clients(self, clients: [str]):
"""Abort the application on the specified clients."""
pass
@abstractmethod
def abort_app_on_server(self, job_id: str):
"""Abort the application on the server."""
pass
@abstractmethod
def shutdown_server(self) -> str:
"""Shutdown the server.
The engine should not exit right away.
It should set its status to STOPPING, and set up a timer (in a different thread),
and return from this call right away (if other restart conditions are met).
When the timer fires, it exits.
This would give the caller to process the feedback or clean up (e.g. admin cmd response).
Returns:
An error message. An empty string if successful.
"""
pass
@abstractmethod
def remove_clients(self, clients: [str]) -> str:
"""Remove specified clients.
Args:
clients: clients to be removed
Returns:
An error message. An empty string if successful.
"""
pass
@abstractmethod
def restart_server(self) -> str:
"""Restart the server.
The engine should not exit right away.
See shutdown_server.
Returns:
An error message. An empty string if successful.
"""
pass
@abstractmethod
def set_run_manager(self, run_manager: RunManager):
"""Set the RunManager for server.
Args:
run_manager: A RunManager object
"""
pass
@abstractmethod
def set_job_runner(self, job_runner: JobRunner, job_manager: JobDefManagerSpec):
"""Set the JobRunner for server.
Args:
job_runner: A JobRunner object
job_manager: A JobDefManagerSpec object
"""
pass
@abstractmethod
def set_configurator(self, conf: ServerJsonConfigurator):
"""Set the configurator for server.
Args:
conf: A ServerJsonConfigurator object
"""
pass
@abstractmethod
def build_component(self, config_dict):
"""Build a component from the config_dict.
Args:
config_dict: configuration.
"""
pass
@abstractmethod
def get_client_from_name(self, client_name: str) -> Client:
"""Get the registered client from client_name.
Args:
client_name: client name
Returns: registered client
"""
pass
@abstractmethod
def get_job_clients(self, client_sites) -> {}:
"""To get the participating clients for the job
Args:
client_sites: clients with the dispatching info
Returns:
"""
pass
@abstractmethod
def ask_to_stop(self):
"""Ask the engine to stop the current run."""
pass
@abstractmethod
def show_stats(self, job_id) -> dict:
"""Show_stats of the server.
Args:
job_id: current job_id
Returns:
Component stats of the server
"""
pass
@abstractmethod
def get_errors(self, job_id) -> dict:
"""Get the errors of the server components.
Args:
job_id: current job_id
Returns:
Server components errors.
"""
pass
@abstractmethod
def reset_errors(self, job_id) -> str:
"""Get the errors of the server components.
Args:
job_id: current job_id
Returns:
Server components errors.
"""
pass
| NVFlare-main | nvflare/private/fed/server/server_engine_internal_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
from nvflare.apis.client import Client
from nvflare.apis.engine_spec import EngineSpec
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext, FLContextManager
from nvflare.apis.server_engine_spec import ServerEngineSpec
from nvflare.apis.workspace import Workspace
from nvflare.private.aux_runner import AuxRunner
from nvflare.private.event import fire_event
from nvflare.private.fed.utils.fed_utils import create_job_processing_context_properties
from .client_manager import ClientManager
from .run_info import RunInfo
class RunManager(EngineSpec):
def __init__(
self,
server_name,
engine: ServerEngineSpec,
job_id,
workspace: Workspace,
components: {str: FLComponent},
client_manager: Optional[ClientManager] = None,
handlers: Optional[List[FLComponent]] = None,
):
"""Manage run.
Args:
server_name: server name
engine (ServerEngineSpec): server engine
job_id: job id
workspace (Workspace): workspace
components (dict): A dict of extra python objects {id: object}
client_manager (ClientManager, optional): client manager
handlers (List[FLComponent], optional): handlers
"""
super().__init__()
self.server_name = server_name
self.client_manager = client_manager
self.handlers = handlers
self.aux_runner = AuxRunner(self)
self.add_handler(self.aux_runner)
if job_id:
job_ctx_props = self.create_job_processing_context_properties(workspace, job_id)
else:
job_ctx_props = {}
self.fl_ctx_mgr = FLContextManager(
engine=engine, identity_name=server_name, job_id=job_id, public_stickers={}, private_stickers=job_ctx_props
)
self.workspace = workspace
self.run_info = RunInfo(job_id=job_id, app_path=self.workspace.get_app_dir(job_id))
self.components = components
self.cell = None
def get_server_name(self):
return self.server_name
def get_run_info(self) -> RunInfo:
return self.run_info
def get_handlers(self):
return self.handlers
def new_context(self) -> FLContext:
return self.fl_ctx_mgr.new_context()
def get_workspace(self) -> Workspace:
return self.workspace
def get_component(self, component_id: str) -> object:
return self.components.get(component_id)
def add_component(self, component_id: str, component):
self.components[component_id] = component
def fire_event(self, event_type: str, fl_ctx: FLContext):
fire_event(event=event_type, handlers=self.handlers, ctx=fl_ctx)
def add_handler(self, handler: FLComponent):
self.handlers.append(handler)
def get_cell(self):
return self.cell
def validate_targets(self, client_names: List[str]) -> Tuple[List[Client], List[str]]:
return self.client_manager.get_all_clients_from_inputs(client_names)
def create_job_processing_context_properties(self, workspace, job_id):
return create_job_processing_context_properties(workspace, job_id)
| NVFlare-main | nvflare/private/fed/server/run_manager.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import logging
from abc import ABC, abstractmethod
from nvflare.apis.fl_context import FLContext
from nvflare.apis.overseer_spec import SP
ACTION = "_action"
MESSAGE = "_message"
NIS = "Not In Service"
ABORT_RUN = "Abort Run"
SERVICE = "In Service"
class ServiceSession:
def __init__(self, host: str = "", port: str = "", ssid: str = "") -> None:
self.host = host
self.service_port = port
self.ssid = ssid
class ServerState(ABC):
NOT_IN_SERVICE = {ACTION: NIS, MESSAGE: "Server not in service"}
ABORT_CURRENT_RUN = {ACTION: ABORT_RUN, MESSAGE: "Abort current run"}
IN_SERVICE = {ACTION: SERVICE, MESSAGE: "Server in service"}
logger = logging.getLogger("ServerState")
def __init__(self, host: str = "", port: str = "", ssid: str = "") -> None:
self.host = host
self.service_port = port
self.ssid = ssid
self.primary = False
@abstractmethod
def register(self, fl_ctx: FLContext) -> dict:
pass
@abstractmethod
def heartbeat(self, fl_ctx: FLContext) -> dict:
pass
@abstractmethod
def get_task(self, fl_ctx: FLContext) -> dict:
pass
@abstractmethod
def submit_result(self, fl_ctx: FLContext) -> dict:
pass
@abstractmethod
def aux_communicate(self, fl_ctx: FLContext) -> dict:
pass
@abstractmethod
def handle_sd_callback(self, sp: SP, fl_ctx: FLContext) -> ServerState:
pass
class ColdState(ServerState):
def register(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def heartbeat(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def get_task(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def submit_result(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def aux_communicate(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def handle_sd_callback(self, sp: SP, fl_ctx: FLContext) -> ServerState:
if sp and sp.primary is True:
if sp.name == self.host and sp.fl_port == self.service_port:
self.primary = True
self.ssid = sp.service_session_id
self.logger.info(
f"Got the primary sp: {sp.name} fl_port: {sp.fl_port} SSID: {sp.service_session_id}. "
f"Turning to hot."
)
return Cold2HotState(host=self.host, port=self.service_port, ssid=sp.service_session_id)
else:
self.primary = False
return self
return self
class Cold2HotState(ServerState):
def register(self, fl_ctx: FLContext) -> dict:
return ServerState.IN_SERVICE
def heartbeat(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def get_task(self, fl_ctx: FLContext) -> dict:
return ServerState.ABORT_CURRENT_RUN
def submit_result(self, fl_ctx: FLContext) -> dict:
return ServerState.ABORT_CURRENT_RUN
def aux_communicate(self, fl_ctx: FLContext) -> dict:
return ServerState.ABORT_CURRENT_RUN
def handle_sd_callback(self, sp: SP, fl_ctx: FLContext) -> ServerState:
return self
class HotState(ServerState):
def register(self, fl_ctx: FLContext) -> dict:
return ServerState.IN_SERVICE
def heartbeat(self, fl_ctx: FLContext) -> dict:
return ServerState.IN_SERVICE
def get_task(self, fl_ctx: FLContext) -> dict:
return ServerState.IN_SERVICE
def submit_result(self, fl_ctx: FLContext) -> dict:
return ServerState.IN_SERVICE
def aux_communicate(self, fl_ctx: FLContext) -> dict:
return ServerState.IN_SERVICE
def handle_sd_callback(self, sp: SP, fl_ctx: FLContext) -> ServerState:
if sp and sp.primary is True:
if sp.name == self.host and sp.fl_port == self.service_port:
self.primary = True
if sp.service_session_id != self.ssid:
self.ssid = sp.service_session_id
self.logger.info(
f"Primary sp changed to: {sp.name} fl_port: {sp.fl_port} SSID: {sp.service_session_id}. "
f"Turning to Cold"
)
return Hot2ColdState(host=self.host, port=self.service_port, ssid=sp.service_session_id)
else:
return self
else:
self.primary = False
self.logger.info(
f"Primary sp changed to: {sp.name} fl_port: {sp.fl_port} SSID: {sp.service_session_id}. "
f"Turning to Cold"
)
return Hot2ColdState(host=self.host, port=self.service_port)
return self
class Hot2ColdState(ServerState):
def register(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def heartbeat(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def get_task(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def submit_result(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def aux_communicate(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def handle_sd_callback(self, sp: SP, fl_ctx: FLContext) -> ServerState:
return self
class ShutdownState(ServerState):
def register(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def heartbeat(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def get_task(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def submit_result(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def aux_communicate(self, fl_ctx: FLContext) -> dict:
return ServerState.NOT_IN_SERVICE
def handle_sd_callback(self, sp: SP, fl_ctx: FLContext) -> ServerState:
return self
| NVFlare-main | nvflare/private/fed/server/server_state.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/private/fed/utils/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from nvflare.apis.app_deployer_spec import AppDeployerSpec
from nvflare.apis.fl_context import FLContext
from nvflare.apis.job_def import JobMetaKey
from nvflare.apis.workspace import Workspace
from nvflare.fuel.utils.zip_utils import unzip_all_from_bytes
from nvflare.private.privacy_manager import PrivacyService
from nvflare.security.logging import secure_format_exception
from .app_authz import AppAuthzService
class AppDeployer(AppDeployerSpec):
def deploy(
self, workspace: Workspace, job_id: str, job_meta: dict, app_name: str, app_data: bytes, fl_ctx: FLContext
) -> str:
"""Deploys the app.
Returns:
error message if any
"""
privacy_scope = job_meta.get(JobMetaKey.SCOPE, "")
# check whether this scope is allowed
if not PrivacyService.is_scope_allowed(privacy_scope):
return f"privacy scope '{privacy_scope}' is not allowed"
try:
run_dir = workspace.get_run_dir(job_id)
app_path = workspace.get_app_dir(job_id)
app_file = os.path.join(run_dir, "fl_app.txt")
job_meta_file = workspace.get_job_meta_path(job_id)
if os.path.exists(run_dir):
shutil.rmtree(run_dir)
if not os.path.exists(app_path):
os.makedirs(app_path)
unzip_all_from_bytes(app_data, app_path)
with open(app_file, "wt") as f:
f.write(f"{app_name}")
with open(job_meta_file, "w") as f:
json.dump(job_meta, f, indent=4)
submitter_name = job_meta.get(JobMetaKey.SUBMITTER_NAME, "")
submitter_org = job_meta.get(JobMetaKey.SUBMITTER_ORG, "")
submitter_role = job_meta.get(JobMetaKey.SUBMITTER_ROLE, "")
authorized, err = AppAuthzService.authorize(
app_path=app_path,
submitter_name=submitter_name,
submitter_org=submitter_org,
submitter_role=submitter_role,
)
if err:
return err
if not authorized:
return "not authorized"
except Exception as e:
raise Exception(f"exception {secure_format_exception(e)} when deploying app {app_name}")
| NVFlare-main | nvflare/private/fed/utils/app_deployer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import logging.config
import os
import sys
from logging.handlers import RotatingFileHandler
from multiprocessing.connection import Listener
from typing import List
from nvflare.apis.app_validation import AppValidator
from nvflare.apis.client import Client
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLContext
from nvflare.apis.fl_constant import FLContextKey, SiteType, WorkspaceConstants
from nvflare.apis.fl_exception import UnsafeComponentError
from nvflare.apis.job_def import JobMetaKey
from nvflare.apis.utils.decomposers import flare_decomposers
from nvflare.apis.workspace import Workspace
from nvflare.app_common.decomposers import common_decomposers
from nvflare.fuel.f3.stats_pool import CsvRecordHandler, StatsPoolManager
from nvflare.fuel.sec.audit import AuditService
from nvflare.fuel.sec.authz import AuthorizationService
from nvflare.fuel.sec.security_content_service import LoadResult, SecurityContentService
from nvflare.private.defs import SSLConstants
from nvflare.private.event import fire_event
from nvflare.private.fed.utils.decomposers import private_decomposers
from nvflare.private.privacy_manager import PrivacyManager, PrivacyService
from nvflare.security.logging import secure_format_exception, secure_log_traceback
from nvflare.security.security import EmptyAuthorizer, FLAuthorizer
from .app_authz import AppAuthzService
def add_logfile_handler(log_file):
root_logger = logging.getLogger()
main_handler = root_logger.handlers[0]
file_handler = RotatingFileHandler(log_file, maxBytes=20 * 1024 * 1024, backupCount=10)
file_handler.setLevel(main_handler.level)
file_handler.setFormatter(main_handler.formatter)
root_logger.addHandler(file_handler)
def listen_command(listen_port, engine, execute_func, logger):
conn = None
listener = None
try:
address = ("localhost", listen_port)
listener = Listener(address, authkey="client process secret password".encode())
conn = listener.accept()
execute_func(conn, engine)
except Exception as e:
logger.exception(
f"Could not create the listener for this process on port: {listen_port}: {secure_format_exception(e)}."
)
secure_log_traceback(logger)
finally:
if conn:
conn.close()
if listener:
listener.close()
def _check_secure_content(site_type: str) -> List[str]:
"""To check the security contents.
Args:
site_type (str): "server" or "client"
Returns:
A list of insecure content.
"""
if site_type == SiteType.SERVER:
config_file_name = WorkspaceConstants.SERVER_STARTUP_CONFIG
else:
config_file_name = WorkspaceConstants.CLIENT_STARTUP_CONFIG
insecure_list = []
data, sig = SecurityContentService.load_json(config_file_name)
if sig != LoadResult.OK:
insecure_list.append(config_file_name)
sites_to_check = data["servers"] if site_type == SiteType.SERVER else [data["client"]]
for site in sites_to_check:
for filename in [SSLConstants.CERT, SSLConstants.PRIVATE_KEY, SSLConstants.ROOT_CERT]:
content, sig = SecurityContentService.load_content(site.get(filename))
if sig != LoadResult.OK:
insecure_list.append(site.get(filename))
if WorkspaceConstants.AUTHORIZATION_CONFIG in SecurityContentService.security_content_manager.signature:
data, sig = SecurityContentService.load_json(WorkspaceConstants.AUTHORIZATION_CONFIG)
if sig != LoadResult.OK:
insecure_list.append(WorkspaceConstants.AUTHORIZATION_CONFIG)
return insecure_list
def security_init(secure_train: bool, site_org: str, workspace: Workspace, app_validator: AppValidator, site_type: str):
"""To check the security content if running in security mode.
Args:
secure_train (bool): if run in secure mode or not.
site_org: organization of the site
workspace: the workspace object.
app_validator: app validator for application validation
site_type (str): server or client. fed_client.json or fed_server.json
"""
# initialize the SecurityContentService.
# must do this before initializing other services since it may be needed by them!
startup_dir = workspace.get_startup_kit_dir()
SecurityContentService.initialize(content_folder=startup_dir)
if secure_train:
insecure_list = _check_secure_content(site_type=site_type)
if len(insecure_list):
print("The following files are not secure content.")
for item in insecure_list:
print(item)
sys.exit(1)
# initialize the AuditService, which is used by command processing.
# The Audit Service can be used in other places as well.
audit_file_name = workspace.get_audit_file_path()
AuditService.initialize(audit_file_name)
if app_validator:
AppAuthzService.initialize(app_validator)
# Initialize the AuthorizationService. It is used by command authorization
# We use FLAuthorizer for policy processing.
# AuthorizationService depends on SecurityContentService to read authorization policy file.
authorizer = None
if secure_train:
policy_file_path = workspace.get_authorization_file_path()
if policy_file_path and os.path.exists(policy_file_path):
policy_config = json.load(open(policy_file_path, "rt"))
authorizer = FLAuthorizer(site_org, policy_config)
if not authorizer:
authorizer = EmptyAuthorizer()
_, err = AuthorizationService.initialize(authorizer)
if err:
print("AuthorizationService error: {}".format(err))
sys.exit(1)
def security_close():
AuditService.close()
def get_job_meta_from_workspace(workspace: Workspace, job_id: str) -> dict:
job_meta_file_path = workspace.get_job_meta_path(job_id)
with open(job_meta_file_path) as file:
return json.load(file)
def create_job_processing_context_properties(workspace: Workspace, job_id: str) -> dict:
job_meta = get_job_meta_from_workspace(workspace, job_id)
assert isinstance(job_meta, dict), f"job_meta must be dict but got {type(job_meta)}"
scope_name = job_meta.get(JobMetaKey.SCOPE, "")
scope_object = PrivacyService.get_scope(scope_name)
scope_props = None
if scope_object:
scope_props = scope_object.props
effective_scope_name = scope_object.name
else:
effective_scope_name = ""
return {
FLContextKey.JOB_META: job_meta,
FLContextKey.JOB_SCOPE_NAME: scope_name,
FLContextKey.EFFECTIVE_JOB_SCOPE_NAME: effective_scope_name,
FLContextKey.SCOPE_PROPERTIES: scope_props,
FLContextKey.SCOPE_OBJECT: scope_object,
}
def find_char_positions(s, ch):
return [i for i, c in enumerate(s) if c == ch]
def configure_logging(workspace: Workspace):
log_config_file_path = workspace.get_log_config_file_path()
assert os.path.isfile(log_config_file_path), f"missing log config file {log_config_file_path}"
logging.config.fileConfig(fname=log_config_file_path, disable_existing_loggers=False)
def get_scope_info():
try:
privacy_manager = PrivacyService.get_manager()
scope_names = []
default_scope_name = ""
if privacy_manager:
assert isinstance(privacy_manager, PrivacyManager)
if privacy_manager.name_to_scopes:
scope_names = sorted(privacy_manager.name_to_scopes.keys(), reverse=False)
if privacy_manager.default_scope:
default_scope_name = privacy_manager.default_scope.name
return scope_names, default_scope_name
except:
return [], "processing_error"
def fobs_initialize():
flare_decomposers.register()
common_decomposers.register()
private_decomposers.register()
def set_stats_pool_config_for_job(workspace: Workspace, job_id: str, prefix=None):
job_meta = get_job_meta_from_workspace(workspace, job_id)
config = job_meta.get(JobMetaKey.STATS_POOL_CONFIG)
if config:
StatsPoolManager.set_pool_config(config)
record_file = workspace.get_stats_pool_records_path(job_id, prefix)
record_writer = CsvRecordHandler(record_file)
StatsPoolManager.set_record_writer(record_writer)
def create_stats_pool_files_for_job(workspace: Workspace, job_id: str, prefix=None):
err = ""
summary_file = workspace.get_stats_pool_summary_path(job_id, prefix)
try:
StatsPoolManager.dump_summary(summary_file)
except Exception as e:
err = f"Failed to create stats pool summary file {summary_file}: {secure_format_exception(e)}"
StatsPoolManager.close()
return err
def split_gpus(gpus) -> [str]:
gpus = gpus.replace(" ", "")
lefts = find_char_positions(gpus, "[")
rights = find_char_positions(gpus, "]")
if len(lefts) != len(rights):
raise ValueError("brackets not paired")
for i in range(len(lefts)):
if i > 0 and lefts[i] < rights[i - 1]:
raise ValueError("brackets cannot be nested")
offset = 0
for i in range(len(lefts)):
l: int = lefts[i] - offset
r: int = rights[i] - offset
if l > r:
raise ValueError("brackets not properly paired")
if l > 0 and gpus[l - 1] != ",":
raise ValueError(f"invalid start of a group: {gpus[l - 1]}")
if r < len(gpus) - 1 and gpus[r + 1] != ",":
raise ValueError(f"invalid end of a group: {gpus[r + 1]}")
g = gpus[l : r + 1] # include both left and right brackets
p = g[1:-1].replace(",", "^")
gpus = gpus.replace(g, p, 1) # only replace the first occurrence!
offset += 2 # everything after the replacement is shifted to left by 2 (since the pair of brackets removed)
result = gpus.split(",")
result = [g.replace("^", ",") for g in result]
return result
def authorize_build_component(config_dict, config_ctx, node, fl_ctx: FLContext, event_handlers) -> str:
workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT)
if not workspace:
raise RuntimeError("missing workspace object in fl_ctx")
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_JOB_ID)
if not job_id:
raise RuntimeError("missing job id in fl_ctx")
meta = get_job_meta_from_workspace(workspace, job_id)
fl_ctx.set_prop(FLContextKey.JOB_META, meta, sticky=False, private=True)
fl_ctx.set_prop(FLContextKey.COMPONENT_CONFIG, config_dict, sticky=False, private=True)
fl_ctx.set_prop(FLContextKey.CONFIG_CTX, config_ctx, sticky=False, private=True)
fl_ctx.set_prop(FLContextKey.COMPONENT_NODE, node, sticky=False, private=True)
fire_event(EventType.BEFORE_BUILD_COMPONENT, event_handlers, fl_ctx)
err = fl_ctx.get_prop(FLContextKey.COMPONENT_BUILD_ERROR)
if err:
return err
# check exceptions
exceptions = fl_ctx.get_prop(FLContextKey.EXCEPTIONS)
if exceptions and isinstance(exceptions, dict):
for handler_name, ex in exceptions.items():
if isinstance(ex, UnsafeComponentError):
err = str(ex)
if not err:
err = f"Unsafe component detected by {handler_name}"
return err
return ""
def get_target_names(targets):
# validate targets
target_names = []
for t in targets:
if isinstance(t, str):
name = t
elif isinstance(t, Client):
name = t.name
else:
raise ValueError(f"invalid target in list: got {type(t)}")
if not name:
# ignore empty name
continue
if name not in target_names:
target_names.append(t)
return target_names
| NVFlare-main | nvflare/private/fed/utils/fed_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.app_validation import AppValidationKey, AppValidator
from nvflare.fuel.sec.authz import AuthorizationService, AuthzContext, Person
_RIGHT_BYOC = "byoc"
class AppAuthzService(object):
app_validator = None
@staticmethod
def initialize(app_validator):
if app_validator and not isinstance(app_validator, AppValidator):
raise TypeError(f"app_validator must be an instance of AppValidator, but get {type(app_validator)}.")
AppAuthzService.app_validator = app_validator
@staticmethod
def authorize(
app_path: str,
submitter_name: str,
submitter_org: str,
submitter_role: str,
) -> (bool, str):
if not AppAuthzService.app_validator:
return True, ""
err, app_info = AppAuthzService.app_validator.validate(app_path)
if err:
return False, err
app_has_custom_code = app_info.get(AppValidationKey.BYOC, False)
if not app_has_custom_code:
return True, ""
ctx = AuthzContext(
user=Person(submitter_name, submitter_org, submitter_role),
submitter=Person(submitter_name, submitter_org, submitter_role),
right=_RIGHT_BYOC,
)
authorized, err = AuthorizationService.authorize(ctx)
if not authorized:
return False, "BYOC not permitted"
return True, ""
| NVFlare-main | nvflare/private/fed/utils/app_authz.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decomposers for objects used by NVFlare platform privately
This module contains all the decomposers used to run NVFlare.
The decomposers are registered at server/client startup.
"""
from nvflare.fuel.utils import fobs
from nvflare.private.admin_defs import Message
from nvflare.private.fed.server.run_info import RunInfo
from nvflare.private.fed.server.server_state import Cold2HotState, ColdState, Hot2ColdState, HotState, ShutdownState
def register():
if register.registered:
return
fobs.register_data_classes(Message, RunInfo, HotState, ColdState, Hot2ColdState, Cold2HotState, ShutdownState)
register.registered = True
register.registered = False
| NVFlare-main | nvflare/private/fed/utils/decomposers/private_decomposers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/private/fed/utils/decomposers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey, ReservedKey, ReservedTopic, ServerCommandKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import ReturnCode, Shareable, make_reply
from nvflare.fuel.f3.message import Message
from nvflare.private.fed.server.run_manager import RunManager
from nvflare.private.fed.server.server_state import HotState
from ..server.fed_server import FederatedServer
from ..server.server_engine import ServerEngine
class SimulatorServerEngine(ServerEngine):
def persist_components(self, fl_ctx: FLContext, completed: bool):
pass
def sync_clients_from_main_process(self):
pass
def update_job_run_status(self):
pass
def send_aux_request(
self,
targets: [],
topic: str,
request: Shareable,
timeout: float,
fl_ctx: FLContext,
optional=False,
secure=False,
) -> dict:
if topic != ReservedTopic.END_RUN:
return super().send_aux_request(targets, topic, request, timeout, fl_ctx, optional, secure=secure)
else:
return {}
class SimulatorRunManager(RunManager):
def create_job_processing_context_properties(self, workspace, job_id):
return {}
class SimulatorServer(FederatedServer):
def __init__(
self,
project_name=None,
min_num_clients=2,
max_num_clients=10,
cmd_modules=None,
heart_beat_timeout=600,
handlers: Optional[List[FLComponent]] = None,
args=None,
secure_train=False,
enable_byoc=False,
snapshot_persistor=None,
overseer_agent=None,
):
super().__init__(
project_name,
min_num_clients,
max_num_clients,
cmd_modules,
heart_beat_timeout,
handlers,
args,
secure_train,
# enable_byoc,
snapshot_persistor,
overseer_agent,
)
self.job_cell = None
self.server_state = HotState()
def _process_task_request(self, client, fl_ctx, shared_fl_ctx: FLContext):
fl_ctx.set_peer_context(shared_fl_ctx)
server_runner = fl_ctx.get_prop(FLContextKey.RUNNER)
taskname, task_id, shareable = server_runner.process_task_request(client, fl_ctx)
return shareable, task_id, taskname
def _submit_update(self, data, shared_fl_context):
with self.engine.new_context() as fl_ctx:
shareable = data.get(ReservedKey.SHAREABLE)
shared_fl_ctx = data.get(ReservedKey.SHARED_FL_CONTEXT)
client = shareable.get_header(ServerCommandKey.FL_CLIENT)
fl_ctx.set_peer_context(shared_fl_ctx)
contribution_task_name = shareable.get_header(ServerCommandKey.TASK_NAME)
task_id = shareable.get_cookie(FLContextKey.TASK_ID)
server_runner = fl_ctx.get_prop(FLContextKey.RUNNER)
server_runner.process_submission(client, contribution_task_name, task_id, shareable, fl_ctx)
def _aux_communicate(self, fl_ctx, shareable, shared_fl_context, topic):
try:
with self.engine.lock:
reply = self.engine.dispatch(topic=topic, request=shareable, fl_ctx=fl_ctx)
except Exception:
self.logger.info("Could not connect to server runner process - asked client to end the run")
reply = make_reply(ReturnCode.COMMUNICATION_ERROR)
return reply
def _create_server_engine(self, args, snapshot_persistor):
return SimulatorServerEngine(
server=self, args=args, client_manager=self.client_manager, snapshot_persistor=snapshot_persistor
)
def deploy(self, args, grpc_args=None, secure_train=False):
super(FederatedServer, self).deploy(args, grpc_args, secure_train)
self._register_cellnet_cbs()
def stop_training(self):
self.engine.run_processes.clear()
super().stop_training()
def create_run_manager(self, workspace, job_id):
return SimulatorRunManager(
server_name=self.project_name,
engine=self.engine,
job_id=job_id,
workspace=workspace,
components=self.runner_config.components,
client_manager=self.client_manager,
handlers=self.runner_config.handlers,
)
def stop_run_engine_cell(self):
self.engine.ask_to_stop()
# self.job_cell.stop()
# super().stop_run_engine_cell()
def authentication_check(self, request: Message, state_check):
return None
def client_cleanup(self):
pass
| NVFlare-main | nvflare/private/fed/simulator/simulator_server.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.sec.audit import Auditor
class SimulatorAuditor(Auditor):
def __init__(self):
pass
def add_event(self, user: str, action: str, ref: str = "", msg: str = "") -> str:
pass
def add_job_event(
self, job_id: str, scope_name: str = "", task_name: str = "", task_id: str = "", ref: str = "", msg: str = ""
) -> str:
pass
def close(self):
pass
| NVFlare-main | nvflare/private/fed/simulator/simulator_audit.py |