python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.pt.he_model_reader_writer",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.pt.he_model_reader_writer import HEPTModelReaderWriter
| NVFlare-main | nvflare/app_common/homomorphic_encryption/he_pt_model_reader_writer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .full_model_shareable_generator import FullModelShareableGenerator
__all__ = ["FullModelShareableGenerator"]
| NVFlare-main | nvflare/app_common/shareablegenerators/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.dxo import DataKind, from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, model_learnable_to_dxo
from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
from nvflare.app_common.app_constant import AppConstants
class FullModelShareableGenerator(ShareableGenerator):
def learnable_to_shareable(self, model_learnable: ModelLearnable, fl_ctx: FLContext) -> Shareable:
"""Convert ModelLearnable to Shareable.
Args:
model_learnable (ModelLearnable): model to be converted
fl_ctx (FLContext): FL context
Returns:
Shareable: a shareable containing a DXO object.
"""
dxo = model_learnable_to_dxo(model_learnable)
return dxo.to_shareable()
def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> ModelLearnable:
"""Convert Shareable to ModelLearnable.
Supporting TYPE == TYPE_WEIGHT_DIFF or TYPE_WEIGHTS
Args:
shareable (Shareable): Shareable that contains a DXO object
fl_ctx (FLContext): FL context
Returns:
A ModelLearnable object
Raises:
TypeError: if shareable is not of type shareable
ValueError: if data_kind is not `DataKind.WEIGHTS` and is not `DataKind.WEIGHT_DIFF`
"""
if not isinstance(shareable, Shareable):
raise TypeError("shareable must be Shareable, but got {}.".format(type(shareable)))
base_model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)
dxo = from_shareable(shareable)
if dxo.data_kind == DataKind.WEIGHT_DIFF:
if not base_model:
self.system_panic(reason="No global base model needed for processing WEIGHT_DIFF!", fl_ctx=fl_ctx)
return base_model
weights = base_model[ModelLearnableKey.WEIGHTS]
if dxo.data is not None:
model_diff = dxo.data
for v_name, v_value in model_diff.items():
weights[v_name] = weights[v_name] + v_value
elif dxo.data_kind == DataKind.WEIGHTS:
if not base_model:
base_model = ModelLearnable()
weights = dxo.data
if not weights:
self.log_info(fl_ctx, "No model weights found. Model will not be updated.")
else:
base_model[ModelLearnableKey.WEIGHTS] = weights
else:
raise ValueError(
"data_kind should be either DataKind.WEIGHTS or DataKind.WEIGHT_DIFF, but got {}".format(dxo.data_kind)
)
base_model[ModelLearnableKey.META] = dxo.get_meta_props()
return base_model
| NVFlare-main | nvflare/app_common/shareablegenerators/full_model_shareable_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from nvflare.apis.app_deployer_spec import AppDeployerSpec, FLContext
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import SystemComponents
from nvflare.apis.job_def import JobMetaKey
from nvflare.apis.job_def_manager_spec import JobDefManagerSpec
from nvflare.apis.utils.job_utils import load_job_def_bytes
from nvflare.apis.workspace import Workspace
from nvflare.fuel.utils.dict_utils import update_components
class HubAppDeployer(AppDeployerSpec, FLComponent):
HUB_CLIENT_CONFIG_TEMPLATE_NAME = "hub_client.json"
OLD_HUB_CLIENT_CONFIG_TEMPLATE_NAME = "t1_config_fed_client.json"
HUB_SERVER_CONFIG_TEMPLATE_NAME = "hub_server.json"
OLD_HUB_SERVER_CONFIG_TEMPLATE_NAME = "t2_server_components.json"
HUB_CLIENT_CONFIG_TEMPLATES = [HUB_CLIENT_CONFIG_TEMPLATE_NAME, OLD_HUB_CLIENT_CONFIG_TEMPLATE_NAME]
HUB_SERVER_CONFIG_TEMPLATES = [HUB_SERVER_CONFIG_TEMPLATE_NAME, OLD_HUB_SERVER_CONFIG_TEMPLATE_NAME]
def __init__(self):
FLComponent.__init__(self)
def prepare(
self, fl_ctx: FLContext, workspace: Workspace, job_id: str, remove_tmp_t2_dir: bool = True
) -> (str, dict, bytes):
"""
Prepare T2 job
Args:
fl_ctx:
workspace:
job_id:
remove_tmp_t2_dir:
Returns: error str if any, meta dict, and job bytes to be submitted to T2 store
"""
server_app_config_path = workspace.get_server_app_config_file_path(job_id)
if not os.path.exists(server_app_config_path):
return f"missing {server_app_config_path}", None, None
# step 2: make a copy of the app for T2
t1_run_dir = workspace.get_run_dir(job_id)
t2_job_id = job_id + "_t2" # temporary ID for creating T2 job
t2_run_dir = workspace.get_run_dir(t2_job_id)
shutil.copytree(t1_run_dir, t2_run_dir)
# step 3: modify the T1 client's config_fed_client.json to use HubExecutor
# simply use t1_config_fed_client.json in the site folder
site_config_dir = workspace.get_site_config_dir()
t1_client_app_config_path = workspace.get_file_path_in_site_config(self.HUB_CLIENT_CONFIG_TEMPLATES)
if not t1_client_app_config_path:
return (
f"no HUB client config template '{self.HUB_CLIENT_CONFIG_TEMPLATES}' in {site_config_dir}",
None,
None,
)
shutil.copyfile(t1_client_app_config_path, workspace.get_client_app_config_file_path(job_id))
# step 4: modify T2 server's config_fed_server.json to use HubController
t2_server_app_config_path = workspace.get_server_app_config_file_path(t2_job_id)
if not os.path.exists(t2_server_app_config_path):
return f"missing {t2_server_app_config_path}", None, None
t2_server_component_file = workspace.get_file_path_in_site_config(self.HUB_SERVER_CONFIG_TEMPLATES)
if not t2_server_component_file:
return (
f"no HUB server config template '{self.HUB_SERVER_CONFIG_TEMPLATES}' in {site_config_dir}",
None,
None,
)
with open(t2_server_app_config_path) as file:
t2_server_app_config_dict = json.load(file)
with open(t2_server_component_file) as file:
t2_server_component_dict = json.load(file)
# update components in the server's config with changed components
# This will replace shareable_generator with the one defined in t2_server_components.json
err = update_components(target_dict=t2_server_app_config_dict, from_dict=t2_server_component_dict)
if err:
return err
# change to use HubController as the workflow for T2
t2_wf = t2_server_component_dict.get("workflows", None)
if not t2_wf:
return f"missing workflows in {t2_server_component_file}", None, None
t2_server_app_config_dict["workflows"] = t2_wf
# recreate T2's server app config file
with open(t2_server_app_config_path, "w") as f:
json.dump(t2_server_app_config_dict, f, indent=4)
# create job meta for T2
t1_meta_path = workspace.get_job_meta_path(job_id)
if not os.path.exists(t1_meta_path):
return f"missing {t1_meta_path}", None, None
with open(t1_meta_path) as file:
t1_meta = json.load(file)
submitter_name = t1_meta.get(JobMetaKey.SUBMITTER_NAME.value, "")
submitter_org = t1_meta.get(JobMetaKey.SUBMITTER_ORG.value, "")
submitter_role = t1_meta.get(JobMetaKey.SUBMITTER_ROLE.value, "")
scope = t1_meta.get(JobMetaKey.SCOPE.value, "")
# Note: the app_name is already created like "app_"+site_name, which is also the directory that contains
# app config files (config_fed_server.json and config_fed_client.json).
# We need to make sure that the deploy-map uses this app name!
# We also add the FROM_HUB_SITE into the T2's job meta to indicate that this job comes from a HUB site.
t2_app_name = "app_" + workspace.site_name
t2_meta = {
"name": t2_app_name,
"deploy_map": {t2_app_name: ["@ALL"]},
"min_clients": 1,
"job_id": job_id,
JobMetaKey.SUBMITTER_NAME.value: submitter_name,
JobMetaKey.SUBMITTER_ORG.value: submitter_org,
JobMetaKey.SUBMITTER_ROLE.value: submitter_role,
JobMetaKey.SCOPE.value: scope,
JobMetaKey.FROM_HUB_SITE.value: workspace.site_name,
}
t2_meta_path = workspace.get_job_meta_path(t2_job_id)
with open(t2_meta_path, "w") as f:
json.dump(t2_meta, f, indent=4)
# step 5: submit T2 app (as a job) to T1's job store
t2_job_def = load_job_def_bytes(from_path=workspace.root_dir, def_name=t2_job_id)
job_validator = fl_ctx.get_prop(SystemComponents.JOB_META_VALIDATOR)
valid, error, meta = job_validator.validate(t2_job_id, t2_job_def)
if not valid:
return f"invalid T2 job definition: {error}", None, None
# make sure meta contains the right job ID
t2_jid = meta.get(JobMetaKey.JOB_ID.value, None)
if not t2_jid:
return "missing Job ID from T2 meta!", None, None
if job_id != t2_jid:
return f"T2 Job ID {t2_jid} != T1 Job ID {job_id}", None, None
# step 6: remove the temporary job def for T2
if remove_tmp_t2_dir:
shutil.rmtree(t2_run_dir)
return "", meta, t2_job_def
def deploy(
self, workspace: Workspace, job_id: str, job_meta: dict, app_name: str, app_data: bytes, fl_ctx: FLContext
) -> str:
# step 1: deploy the T1 app into the workspace
deployer = fl_ctx.get_prop(SystemComponents.DEFAULT_APP_DEPLOYER)
err = deployer.deploy(workspace, job_id, job_meta, app_name, app_data, fl_ctx)
if err:
self.log_error(fl_ctx, f"Failed to deploy job {job_id}: {err}")
return err
err, meta, t2_job_def = self.prepare(fl_ctx, workspace, job_id)
if err:
self.log_error(fl_ctx, f"Failed to deploy job {job_id}: {err}")
return err
engine = fl_ctx.get_engine()
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
if not isinstance(job_manager, JobDefManagerSpec):
return "Job Manager for T2 not configured!"
job_manager.create(meta, t2_job_def, fl_ctx)
return ""
| NVFlare-main | nvflare/app_common/hub/hub_app_deployer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import ReservedHeaderKey, Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from nvflare.fuel.utils.pipe.pipe import Message, Pipe
from nvflare.fuel.utils.pipe.pipe_handler import PipeHandler, Topic
from nvflare.fuel.utils.validation_utils import check_positive_number, check_str
class HubExecutor(Executor):
"""
This executor is to be used by Tier-1 (T1) clients.
It exchanges task data/result with the Hub Controller of Tier-2 (T2) Server
"""
def __init__(
self, pipe_id: str, task_wait_time=None, result_poll_interval: float = 0.1, task_read_wait_time: float = 10.0
):
"""
Args:
pipe_id:
task_wait_time: how long to wait for result from T2
result_poll_interval: polling interval for T2 result
task_read_wait_time: how long to wait for T2 to read a task assignment
"""
Executor.__init__(self)
check_str("pipe_id", pipe_id)
if task_wait_time is not None:
check_positive_number("task_wait_time", task_wait_time)
check_positive_number("result_poll_interval", result_poll_interval)
check_positive_number("task_read_wait_time", task_read_wait_time)
self.pipe_id = pipe_id
self.task_wait_time = task_wait_time
self.result_poll_interval = result_poll_interval
self.task_read_wait_time = task_read_wait_time
self.task_seq_num = 0
self.t2_ended = False
self.pipe_handler = None
def handle_event(self, event_type: str, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
if event_type == EventType.START_RUN:
job_id = fl_ctx.get_job_id()
pipe: Pipe = engine.get_component(self.pipe_id)
if not isinstance(pipe, Pipe):
raise TypeError(f"pipe must be Pipe type. Got: {type(pipe)}")
pipe.open(name=job_id)
self.pipe_handler = PipeHandler(pipe)
self.pipe_handler.start()
elif event_type == EventType.END_RUN:
# tell T2 system to end run
self.log_info(fl_ctx, "END_RUN received - telling T2 to stop")
self.pipe_handler.notify_end("END_RUN received")
self.pipe_handler.stop()
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
contrib_round = shareable.get_cookie(AppConstants.CONTRIBUTION_ROUND)
if contrib_round is None:
self.log_warning(fl_ctx, "CONTRIBUTION_ROUND Not Set in task data!")
# send the task to T2
task_id = shareable.get_header(ReservedHeaderKey.TASK_ID)
self.log_info(fl_ctx, f"sending task data to T2 for task {task_name}")
req = Message.new_request(topic=task_name, data=shareable)
task_received_by_t2 = self.pipe_handler.send_to_peer(req, timeout=self.task_read_wait_time)
if not task_received_by_t2:
self.log_error(
fl_ctx, f"T2 failed to read task '{task_name}' in {self.task_read_wait_time} secs - aborting task!"
)
return make_reply(ReturnCode.SERVICE_UNAVAILABLE)
# wait for result from T2
start = time.time()
while True:
if abort_signal.triggered:
# notify T2 that the task is aborted
self.pipe_handler.notify_abort(task_id)
return make_reply(ReturnCode.TASK_ABORTED)
reply = self.pipe_handler.get_next()
if not reply:
if self.task_wait_time and time.time() - start > self.task_wait_time:
# timed out
self.log_error(fl_ctx, f"task '{task_name}' timeout after {self.task_wait_time} secs")
# also tell T2 to abort the task
self.pipe_handler.notify_abort(task_id)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
elif reply.topic == Topic.ABORT:
# T2 told us to abort the task!
return make_reply(ReturnCode.TASK_ABORTED)
elif reply.topic in [Topic.END, Topic.PEER_GONE]:
# T2 told us it has ended the run
self.log_error(fl_ctx, f"received {reply.topic} from T2 while waiting for result for {task_name}")
return make_reply(ReturnCode.SERVICE_UNAVAILABLE)
elif reply.msg_type != Message.REPLY:
self.log_warning(
fl_ctx, f"ignored msg '{reply.topic}.{reply.req_id}' when waiting for '{req.topic}.{req.msg_id}'"
)
elif req.topic != reply.topic:
# ignore wrong task name
self.log_warning(fl_ctx, f"ignored '{reply.topic}' when waiting for '{req.topic}'")
elif req.msg_id != reply.req_id:
self.log_warning(fl_ctx, f"ignored '{reply.req_id}' when waiting for '{req.msg_id}'")
else:
self.log_info(fl_ctx, f"got result for request '{task_name}' from T2")
if not isinstance(reply.data, Shareable):
self.log_error(fl_ctx, f"bad result data from T2 - must be Shareable but got {type(reply.data)}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# add important meta information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
if current_round:
reply.data.set_header(AppConstants.CURRENT_ROUND, current_round)
return reply.data
time.sleep(self.result_poll_interval)
| NVFlare-main | nvflare/app_common/hub/hub_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/hub/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from typing import Union
from nvflare.apis.client import Client
from nvflare.apis.controller_spec import (
ClientTask,
ControllerSpec,
OperatorConfigKey,
OperatorMethod,
Task,
TaskOperatorKey,
)
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import Controller
from nvflare.apis.operator_spec import OperatorSpec
from nvflare.apis.shareable import ReservedHeaderKey, Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.aggregator import Aggregator
from nvflare.app_common.abstract.learnable_persistor import LearnablePersistor
from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.fuel.utils.pipe.pipe import Message, Pipe
from nvflare.fuel.utils.pipe.pipe_handler import PipeHandler, Topic
from nvflare.fuel.utils.validation_utils import check_object_type, check_positive_number, check_str
class BcastOperator(OperatorSpec, FLComponent):
_PROP_AGGR = "aggr"
def __init__(self):
OperatorSpec.__init__(self)
FLComponent.__init__(self)
self.current_aggregator = None
@staticmethod
def _get_aggregator(op_description: dict, fl_ctx: FLContext):
aggr_id = op_description.get(TaskOperatorKey.AGGREGATOR, "")
if not aggr_id:
raise RuntimeError("missing aggregator component id")
engine = fl_ctx.get_engine()
aggr = engine.get_component(aggr_id)
if not aggr:
raise RuntimeError(f"no aggregator defined for component id {aggr_id}")
if not isinstance(aggr, Aggregator):
raise RuntimeError(f"component {aggr_id} must be Aggregator but got {type(aggr)}")
return aggr
def operate(
self,
op_description: dict,
controller: ControllerSpec,
task_name: str,
task_data: Shareable,
abort_signal: Signal,
fl_ctx: FLContext,
) -> Union[Shareable, None]:
aggr = self._get_aggregator(op_description, fl_ctx)
# reset the internal state of the aggregator for next round of aggregation
self.current_aggregator = aggr
aggr.reset(fl_ctx)
engine = fl_ctx.get_engine()
total_num_clients = len(engine.get_clients())
timeout = op_description.get(TaskOperatorKey.TIMEOUT, 0)
wait_time_after_min_resps = op_description.get(TaskOperatorKey.WAIT_TIME_AFTER_MIN_RESPS, 5)
min_clients = op_description.get(TaskOperatorKey.MIN_TARGETS, 0)
if min_clients > total_num_clients:
min_clients = total_num_clients
wait_time_after_min_resps = 0
targets = op_description.get(TaskOperatorKey.TARGETS, None)
# data is from T1
train_task = Task(
name=task_name,
data=task_data,
props={self._PROP_AGGR: aggr},
timeout=timeout,
result_received_cb=self._process_bcast_result,
)
controller.broadcast_and_wait(
task=train_task,
targets=targets,
min_responses=min_clients,
wait_time_after_min_received=wait_time_after_min_resps,
fl_ctx=fl_ctx,
abort_signal=abort_signal,
)
aggr_result = aggr.aggregate(fl_ctx)
self.current_aggregator = None
return aggr_result
def _process_bcast_result(self, client_task: ClientTask, fl_ctx: FLContext) -> None:
result = client_task.result
aggr = client_task.task.get_prop(self._PROP_AGGR)
aggr.accept(result, fl_ctx)
# Cleanup task result
client_task.result = None
def process_result_of_unknown_task(
self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext
):
aggr = self.current_aggregator
if aggr:
aggr.accept(result, fl_ctx)
class RelayOperator(OperatorSpec, FLComponent):
_PROP_LAST_RESULT = "last_result"
_PROP_SHAREABLE_GEN = "shareable_generator"
def __init__(self):
OperatorSpec.__init__(self)
FLComponent.__init__(self)
@staticmethod
def _get_shareable_generator(op_description: dict, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
comp_id = op_description.get(TaskOperatorKey.SHAREABLE_GENERATOR, "")
if not comp_id:
return None
shareable_generator = engine.get_component(comp_id)
if not shareable_generator:
raise RuntimeError(f"no shareable generator defined for component id {comp_id}")
if not isinstance(shareable_generator, ShareableGenerator):
raise RuntimeError(f"component {comp_id} must be ShareableGenerator but got {type(shareable_generator)}")
return shareable_generator
@staticmethod
def _get_persistor(op_description: dict, fl_ctx: FLContext):
persistor_id = op_description.get(TaskOperatorKey.PERSISTOR, "")
if not persistor_id:
return None
engine = fl_ctx.get_engine()
persistor = engine.get_component(persistor_id)
if not persistor:
raise RuntimeError(f"no persistor defined for component id {persistor_id}")
if not isinstance(persistor, LearnablePersistor):
raise RuntimeError(f"component {persistor_id} must be LearnablePersistor but got {type(persistor)}")
return persistor
def operate(
self,
op_description: dict,
controller: ControllerSpec,
task_name: str,
task_data: Shareable,
abort_signal: Signal,
fl_ctx: FLContext,
) -> Union[None, Shareable]:
current_round = task_data.get_header(AppConstants.CURRENT_ROUND, None)
shareable_generator = self._get_shareable_generator(op_description, fl_ctx)
persistor = self._get_persistor(op_description, fl_ctx)
if persistor:
# The persistor should convert the TASK_DATA in the fl_ctx into a learnable
# This learnable is the base for the relay
learnable_base = persistor.load(fl_ctx)
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, learnable_base, private=True, sticky=False)
task = Task(
name=task_name,
data=task_data,
props={
AppConstants.CURRENT_ROUND: current_round,
self._PROP_LAST_RESULT: None,
self._PROP_SHAREABLE_GEN: shareable_generator,
},
result_received_cb=self._process_relay_result,
)
targets = op_description.get(TaskOperatorKey.TARGETS, None)
task_assignment_timeout = op_description.get(TaskOperatorKey.TASK_ASSIGNMENT_TIMEOUT, 0)
controller.relay_and_wait(
task=task,
targets=targets,
task_assignment_timeout=task_assignment_timeout,
fl_ctx=fl_ctx,
dynamic_targets=True,
abort_signal=abort_signal,
)
if abort_signal.triggered:
return None
return task.get_prop(self._PROP_LAST_RESULT)
def _process_relay_result(self, client_task: ClientTask, fl_ctx: FLContext):
# submitted shareable is stored in client_task.result
# we need to update task.data with that shareable so the next target
# will get the updated shareable
task = client_task.task
current_round = task.get_prop(AppConstants.CURRENT_ROUND)
task.set_prop(self._PROP_LAST_RESULT, client_task.result)
task_data = client_task.result
shareable_generator = task.get_prop(self._PROP_SHAREABLE_GEN)
if shareable_generator:
# turn received result (a Shareable) to learnable (i.e. weight diff => weight)
learnable = shareable_generator.shareable_to_learnable(client_task.result, fl_ctx)
# turn the learnable to task data for the next leg (i.e. weight Learnable to weight Shareable)
task_data = shareable_generator.learnable_to_shareable(learnable, fl_ctx)
if current_round:
task_data.set_header(AppConstants.CURRENT_ROUND, current_round)
task.data = task_data
client_task.result = None
class HubController(Controller):
def __init__(
self,
pipe_id: str,
task_wait_time=None,
task_data_poll_interval: float = 0.1,
):
Controller.__init__(self)
check_positive_number("task_data_poll_interval", task_data_poll_interval)
check_str("pipe_id", pipe_id)
if task_wait_time is not None:
check_positive_number("task_wait_time", task_wait_time)
self.pipe_id = pipe_id
self.operator_descs = None
self.task_wait_time = task_wait_time
self.task_data_poll_interval = task_data_poll_interval
self.pipe = None
self.pipe_handler = None
self.run_ended = False
self.task_abort_signal = None
self.current_task_name = None
self.current_task_id = None
self.current_operator = None
self.builtin_operators = {OperatorMethod.BROADCAST: BcastOperator(), OperatorMethod.RELAY: RelayOperator()}
self.project_name = ""
def start_controller(self, fl_ctx: FLContext) -> None:
self.project_name = fl_ctx.get_identity_name()
# get operators
engine = fl_ctx.get_engine()
job_id = fl_ctx.get_job_id()
workspace = engine.get_workspace()
app_config_file = workspace.get_server_app_config_file_path(job_id)
with open(app_config_file) as file:
app_config = json.load(file)
self.operator_descs = app_config.get(OperatorConfigKey.OPERATORS, {})
self.log_debug(fl_ctx, f"Got operator descriptions: {self.operator_descs}")
def handle_event(self, event_type: str, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
if event_type == EventType.START_RUN:
job_id = fl_ctx.get_job_id()
pipe = engine.get_component(self.pipe_id)
check_object_type("pipe", pipe, Pipe)
pipe.open(name=job_id)
self.pipe_handler = PipeHandler(pipe)
elif event_type == EventType.END_RUN:
self.run_ended = True
def _abort(self, reason: str, abort_signal: Signal, fl_ctx):
self.pipe_handler.notify_abort(reason)
if reason:
self.log_error(fl_ctx, reason)
if abort_signal:
abort_signal.trigger(True)
def _get_operator(self, task_name: str, op_desc: dict, fl_ctx: FLContext):
method_name = op_desc.get(TaskOperatorKey.METHOD)
if not method_name:
return None, f"bad operator in task '{task_name}' from T1 - missing method name"
# see whether an Operator is defined for the method
engine = fl_ctx.get_engine()
operator = engine.get_component(method_name)
if not operator:
operator = self.builtin_operators.get(method_name, None)
if not operator:
return None, f"bad task '{task_name}' from T1 - no operator for '{method_name}'"
if not isinstance(operator, OperatorSpec):
return None, f"operator for '{method_name}' must be OperatorSpec but got {type(operator)}"
return operator, ""
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
try:
self.pipe_handler.start()
self._control_flow(abort_signal, fl_ctx)
self.pipe_handler.stop()
except Exception as ex:
self.log_exception(fl_ctx, "control flow exception")
self._abort(f"control_flow exception {ex}", abort_signal, fl_ctx)
def _control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
control_flow_start = time.time()
task_start = control_flow_start
while True:
if self.run_ended:
# tell T1 to end the run
self._abort(reason="", abort_signal=abort_signal, fl_ctx=fl_ctx)
return
if abort_signal.triggered:
# tell T1 to end the run
self._abort(reason="", abort_signal=abort_signal, fl_ctx=fl_ctx)
return
msg = self.pipe_handler.get_next()
if not msg:
if self.task_wait_time and time.time() - task_start > self.task_wait_time:
# timed out - tell T1 to end the RUN
self._abort(
reason=f"task data timeout after {self.task_wait_time} secs",
abort_signal=abort_signal,
fl_ctx=fl_ctx,
)
return
else:
if msg.topic in [Topic.ABORT, Topic.END, Topic.PEER_GONE]:
# the T1 peer is gone
self.log_info(fl_ctx, f"T1 stopped: '{msg.topic}'")
return
if msg.msg_type != Message.REQUEST:
self.log_info(fl_ctx, f"ignored '{msg.topic}' from T1 - not a request!")
continue
self.log_info(fl_ctx, f"got data for task '{msg.topic}' from T1")
if not isinstance(msg.data, Shareable):
self._abort(
reason=f"bad data for task '{msg.topic}' from T1 - must be Shareable but got {type(msg.data)}",
abort_signal=abort_signal,
fl_ctx=fl_ctx,
)
return
task_data = msg.data
task_name = task_data.get_header(ReservedHeaderKey.TASK_NAME)
if not task_name:
self._abort(
reason=f"bad data for task '{msg.topic}' from T1 - missing task name",
abort_signal=abort_signal,
fl_ctx=fl_ctx,
)
return
task_id = task_data.get_header(ReservedHeaderKey.TASK_ID)
if not task_id:
self._abort(
reason=f"bad data for task '{msg.topic}' from T1 - missing task id",
abort_signal=abort_signal,
fl_ctx=fl_ctx,
)
return
op_desc = task_data.get_header(ReservedHeaderKey.TASK_OPERATOR, {})
op_id = op_desc.get(TaskOperatorKey.OP_ID)
if not op_id:
# use task_name as the operation id
op_desc[TaskOperatorKey.OP_ID] = task_name
self._resolve_op_desc(op_desc, fl_ctx)
operator, err = self._get_operator(task_name, op_desc, fl_ctx)
if not operator:
self._abort(reason=err, abort_signal=abort_signal, fl_ctx=fl_ctx)
return
operator_name = operator.__class__.__name__
self.log_info(fl_ctx, f"Invoking Operator {operator_name} for task {task_name}")
try:
current_round = task_data.get_header(AppConstants.CURRENT_ROUND, 0)
fl_ctx.set_prop(AppConstants.CURRENT_ROUND, current_round, private=True, sticky=True)
contrib_round = task_data.get_cookie(AppConstants.CONTRIBUTION_ROUND)
if contrib_round is None:
self.log_warning(fl_ctx, "CONTRIBUTION_ROUND Not Set!")
self.fire_event(AppEventType.ROUND_STARTED, fl_ctx)
fl_ctx.set_prop(key=FLContextKey.TASK_DATA, value=task_data, private=True, sticky=False)
self.current_task_name = task_name
self.current_task_id = task_id
self.task_abort_signal = abort_signal
self.current_operator = operator
result = operator.operate(
task_name=task_name,
task_data=task_data,
op_description=op_desc,
controller=self,
abort_signal=abort_signal,
fl_ctx=fl_ctx,
)
except:
self.log_exception(fl_ctx, f"exception processing '{task_name}' from operator '{operator_name}'")
result = None
finally:
self.task_abort_signal = None
self.current_task_id = None
self.current_operator = None
self.fire_event(AppEventType.ROUND_DONE, fl_ctx)
if not result:
self.log_error(fl_ctx, f"no result from operator '{operator_name}'")
result = make_reply(ReturnCode.EXECUTION_EXCEPTION)
elif not isinstance(result, Shareable):
self.log_error(
fl_ctx, f"bad result from operator '{operator_name}': expect Shareable but got {type(result)}"
)
result = make_reply(ReturnCode.EXECUTION_EXCEPTION)
reply = Message.new_reply(topic=msg.topic, data=result, req_msg_id=msg.msg_id)
self.pipe_handler.send_to_peer(reply)
task_start = time.time()
time.sleep(self.task_data_poll_interval)
def _resolve_op_desc(self, op_desc: dict, fl_ctx: FLContext):
"""
Determine the correct operation description.
There may be "operators" in job's config_fed_server.json.
If present, it describes the operations for tasks, and its descriptions override op_desc that comes from task!
It may specify a different method than the one in op_desc!
For example, the op_desc may specify the method 'bcast', but the config could specify 'relay'.
In this case, the 'relay' method will be used.
Args:
op_desc: the op description that comes from the task data
Returns: None
"""
op_id = op_desc.get(TaskOperatorKey.OP_ID, None)
if op_id:
# see whether config is set up for this op
# if so, the info in the config overrides op_desc!
# first try to find project-specific definition
op_config = self.operator_descs.get(f"{self.project_name}.{op_id}", None)
if op_config:
self.log_debug(fl_ctx, f"Use CONFIGURED OPERATORS for {self.project_name}.{op_id}")
else:
# try to find general definition
op_config = self.operator_descs.get(op_id, None)
if op_config:
self.log_debug(fl_ctx, f"Use CONFIGURED OPERATORS for {op_id}")
if op_config:
op_desc.update(op_config)
else:
self.log_debug(fl_ctx, "OPERATORS NOT CONFIGURED")
def process_result_of_unknown_task(
self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext
):
# A late reply is received from client.
# We'll include the late reply into the aggregation only if it's for the same type of tasks (i.e.
# same task name). Note that the same task name could be used many times (rounds).
self.log_info(fl_ctx, f"Late response received from client {client.name} for task '{task_name}'")
operator = self.current_operator
if task_name == self.current_task_name and operator:
operator.process_result_of_unknown_task(
client=client, task_name=task_name, client_task_id=client_task_id, result=result, fl_ctx=fl_ctx
)
else:
self.log_warning(fl_ctx, f"Dropped late response received from client {client.name} for task '{task_name}'")
def stop_controller(self, fl_ctx: FLContext):
pass
| NVFlare-main | nvflare/app_common/hub/hub_controller.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Dict
from nvflare.apis.client_engine_spec import ClientEngineSpec
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.server_engine_spec import ServerEngineSpec
from nvflare.apis.shareable import Shareable
from nvflare.widgets.widget import Widget
class _CtxPropReq(object):
"""Requirements of a prop in the FLContext.
Arguments:
dtype: data type of the prop.
is_private: if this prop is private.
is_sticky: if this prop is sticky.
allow_none: if this prop can be None
"""
def __init__(self, dtype, is_private, is_sticky, allow_none: bool = False):
self.dtype = dtype
self.is_private = is_private
self.is_sticky = is_sticky
self.allow_none = allow_none
class _EventReq(object):
"""Requirements for FL and peer context when an event is fired.
Arguments:
ctx_reqs: A dictionary that describes the requirements for fl_ctx. It maps property names to _CtxPropReq
peer_ctx_reqs: A dictionary that describes the requirements for peer_ctx. It maps property names to _CtxPropReq
"""
def __init__(
self,
ctx_reqs: Dict[str, _CtxPropReq],
peer_ctx_reqs: Dict[str, _CtxPropReq],
ctx_block_list: [str] = None,
peer_ctx_block_list: [str] = None,
):
self.ctx_reqs = ctx_reqs # prop name => _CtxPropReq
self.peer_ctx_reqs = peer_ctx_reqs
if ctx_block_list is None:
ctx_block_list = []
if peer_ctx_block_list is None:
peer_ctx_block_list = []
self.ctx_block_list = ctx_block_list
self.peer_ctx_block_list = peer_ctx_block_list
class _EventStats(object):
"""Stats of each event."""
def __init__(self):
self.call_count = 0
self.prop_missing = 0
self.prop_none_value = 0
self.prop_dtype_mismatch = 0
self.prop_attr_mismatch = 0
self.prop_block_list_violation = 0
self.peer_ctx_missing = 0
class EventRecorder(Widget):
_KEY_CTX_TYPE = "ctx_type"
_KEY_EVENT_TYPE = "event_type"
_KEY_EVENT_STATS = "event_stats"
_KEY_EVENT_REQ = "event_req"
def __init__(self, log_file_name=None):
"""A component to record all system-wide events.
Args:
log_file_name (str, optional): the log filename to save recorded events. Defaults to None.
"""
super().__init__()
all_ctx_reqs = {
"__run_num__": _CtxPropReq(dtype=str, is_private=False, is_sticky=True),
"__identity_name__": _CtxPropReq(dtype=str, is_private=False, is_sticky=True),
}
run_req = _EventReq(ctx_reqs=all_ctx_reqs, peer_ctx_reqs={})
self.event_reqs = {EventType.START_RUN: run_req, EventType.END_RUN: run_req} # event type => _EventReq
self.event_stats = {} # event_type => _EventStats
self._log_handler_added = False
self.log_file_name = log_file_name if log_file_name else "event_recorded.txt"
def event_tag(self, fl_ctx: FLContext):
event_type = fl_ctx.get_prop(self._KEY_EVENT_TYPE, "?")
event_id = fl_ctx.get_prop(FLContextKey.EVENT_ID, None)
if event_id:
return "[type={}, id={}]".format(event_type, event_id)
else:
return "[{}]".format(event_type)
def event_error_tag(self, fl_ctx: FLContext):
ctx_type = fl_ctx.get_prop(self._KEY_CTX_TYPE, "?")
return "Event {}: in {},".format(self.event_tag(fl_ctx), ctx_type)
def validate_prop(self, prop_name: str, req: _CtxPropReq, fl_ctx: FLContext):
stats = fl_ctx.get_prop(self._KEY_EVENT_STATS, None)
detail = fl_ctx.get_prop_detail(prop_name)
if not isinstance(detail, dict):
stats.prop_missing += 1
self.logger.error("{} required prop '{}' doesn't exist".format(self.event_error_tag(fl_ctx), prop_name))
return
value = detail["value"]
if value is None and not req.allow_none:
stats.prop_none_value += 1
self.logger.error(
"{} prop '{}' is None, but None is not allowed".format(self.event_error_tag(fl_ctx), prop_name)
)
if req.dtype is not None:
if not isinstance(value, req.dtype):
stats.prop_dtype_mismatch += 1
self.logger.error(
"{} prop '{}' should be {}, but got {}".format(
self.event_error_tag(fl_ctx), prop_name, req.dtype, type(value)
)
)
if req.is_private and not detail["private"]:
stats.prop_attr_mismatch += 1
self.logger.error(
"{} prop '{}' should be private but is public".format(self.event_error_tag(fl_ctx), prop_name)
)
if req.is_private is not None and not req.is_private and detail["private"]:
stats.prop_attr_mismatch += 1
self.logger.error(
"{} prop '{}' should be public but is private".format(self.event_error_tag(fl_ctx), prop_name)
)
if req.is_sticky and not detail["sticky"]:
stats.prop_attr_mismatch += 1
self.logger.error(
"{} prop '{}' should be sticky but is non-sticky".format(self.event_error_tag(fl_ctx), prop_name)
)
if req.is_sticky is not None and not req.is_sticky and detail["sticky"]:
stats.prop_attr_mismatch += 1
self.logger.error(
"{} prop '{}' should be non-sticky but is sticky".format(self.event_error_tag(fl_ctx), prop_name)
)
def check_block_list(self, block_list, fl_ctx: FLContext):
stats = fl_ctx.get_prop(self._KEY_EVENT_STATS, None)
for prop_name in block_list:
detail = fl_ctx.get_prop_detail(prop_name)
if detail:
stats.prop_block_list_violation += 1
self.logger.error("{} prop {} is not expected".format(self.event_error_tag(fl_ctx), prop_name))
def check_props(self, fl_ctx: FLContext):
event_req = fl_ctx.get_prop(self._KEY_EVENT_REQ)
stats = fl_ctx.get_prop(self._KEY_EVENT_STATS)
for prop_name, req in event_req.ctx_reqs.items():
self.validate_prop(prop_name, req, fl_ctx)
self.check_block_list(event_req.ctx_block_list, fl_ctx)
if event_req.peer_ctx_reqs:
peer_ctx = fl_ctx.get_peer_context()
if not peer_ctx:
stats.peer_ctx_missing += 1
self.logger.error("{} expected peer_ctx not present".format(self.event_error_tag(fl_ctx)))
else:
for prop_name, req in event_req.peer_ctx_reqs.items():
self.validate_prop(prop_name, req, peer_ctx)
self.check_block_list(event_req.peer_ctx_block_list, peer_ctx)
def handle_event(self, event_type: str, fl_ctx: FLContext):
if not self._log_handler_added:
workspace = fl_ctx.get_engine().get_workspace()
app_dir = workspace.get_app_dir(fl_ctx.get_job_id())
output_file_handler = logging.FileHandler(os.path.join(app_dir, self.log_file_name))
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
output_file_handler.setFormatter(formatter)
self.logger.addHandler(output_file_handler)
self._log_handler_added = True
event_stats = self.event_stats.get(event_type, None)
if not event_stats:
event_stats = _EventStats()
self.event_stats[event_type] = event_stats
fl_ctx.set_prop(key=self._KEY_EVENT_STATS, value=event_stats, private=True, sticky=False)
fl_ctx.set_prop(key=self._KEY_EVENT_TYPE, value=event_type, private=True, sticky=False)
fl_ctx.set_prop(key=self._KEY_CTX_TYPE, value="fl_ctx", private=True, sticky=False)
self.log_info(fl_ctx, "Got event {}".format(self.event_tag(fl_ctx)), fire_event=False)
event_stats.call_count += 1
peer_ctx = fl_ctx.get_peer_context()
if peer_ctx:
event_id = fl_ctx.get_prop(FLContextKey.EVENT_ID)
peer_ctx.set_prop(key=FLContextKey.EVENT_ID, value=event_id, private=True, sticky=False)
peer_ctx.set_prop(key=self._KEY_EVENT_STATS, value=event_stats, private=True, sticky=False)
peer_ctx.set_prop(key=self._KEY_EVENT_TYPE, value=event_type, private=True, sticky=False)
peer_ctx.set_prop(key=self._KEY_CTX_TYPE, value="peer_ctx", private=True, sticky=False)
self.log_info(
fl_ctx, "Peer Context for event {}: {}".format(self.event_tag(fl_ctx), peer_ctx), fire_event=False
)
event_req = self.event_reqs.get(event_type, None)
fl_ctx.set_prop(key=self._KEY_EVENT_REQ, value=event_req, private=True, sticky=False)
if event_req:
self.check_props(fl_ctx)
if event_type == EventType.END_RUN:
# print stats
for e, s in self.event_stats.items():
self.log_info(fl_ctx, "Stats of {}: {}".format(e, vars(s)), fire_event=False)
class ServerEventRecorder(EventRecorder):
def __init__(self):
"""Server-specific event recorder."""
super().__init__()
task_data_filter_reqs = _EventReq(
ctx_reqs={
"__engine__": _CtxPropReq(dtype=ServerEngineSpec, is_private=True, is_sticky=True),
FLContextKey.TASK_ID: _CtxPropReq(dtype=str, is_private=True, is_sticky=False),
FLContextKey.TASK_NAME: _CtxPropReq(dtype=str, is_private=True, is_sticky=False),
FLContextKey.TASK_DATA: _CtxPropReq(dtype=Shareable, is_private=True, is_sticky=False, allow_none=True),
"testPrivateServerSticky": _CtxPropReq(dtype=str, is_private=True, is_sticky=True),
"testPublicServerSticky": _CtxPropReq(dtype=str, is_private=False, is_sticky=True),
},
ctx_block_list=[
"testPrivateServerNonSticky",
"testPublicServerNonSticky",
"testPrivateClientNonSticky",
"testPublicClientNonSticky",
"testPrivateClientSticky",
"testPublicClientSticky",
],
peer_ctx_reqs={
"__run_num__": _CtxPropReq(dtype=str, is_private=None, is_sticky=None),
"__identity_name__": _CtxPropReq(dtype=str, is_private=None, is_sticky=None),
"testPublicClientSticky": _CtxPropReq(dtype=str, is_private=None, is_sticky=None),
},
peer_ctx_block_list=[
"__engine__",
"testPrivateClientSticky",
"testPrivateClientNonSticky",
"testPublicClientNonSticky",
],
)
self.event_reqs.update(
{
EventType.BEFORE_TASK_DATA_FILTER: task_data_filter_reqs,
EventType.AFTER_TASK_DATA_FILTER: task_data_filter_reqs,
}
)
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
fl_ctx.set_prop(
key="testPrivateServerSticky", value="this is a server private sticky", private=True, sticky=True
)
fl_ctx.set_prop(
key="testPublicServerSticky", value="this is a server public sticky", private=False, sticky=True
)
fl_ctx.set_prop(
key="testPrivateServerNonSticky",
value="this is a server private non-sticky",
private=True,
sticky=False,
)
fl_ctx.set_prop(
key="testPublicServerNonSticky", value="this is a server public non-sticky", private=False, sticky=False
)
super().handle_event(event_type, fl_ctx)
class ClientEventRecorder(EventRecorder):
def __init__(self):
"""Client-specific event recorder."""
super().__init__()
task_data_filter_reqs = _EventReq(
ctx_reqs={
"__engine__": _CtxPropReq(dtype=ClientEngineSpec, is_private=True, is_sticky=True),
FLContextKey.TASK_ID: _CtxPropReq(dtype=str, is_private=True, is_sticky=False),
FLContextKey.TASK_NAME: _CtxPropReq(dtype=str, is_private=True, is_sticky=False),
FLContextKey.TASK_DATA: _CtxPropReq(dtype=Shareable, is_private=True, is_sticky=False, allow_none=True),
"testPrivateClientSticky": _CtxPropReq(dtype=str, is_private=True, is_sticky=True),
"testPublicClientSticky": _CtxPropReq(dtype=str, is_private=False, is_sticky=True),
},
ctx_block_list=[
"testPrivateServerNonSticky",
"testPublicServerNonSticky",
"testPrivateClientNonSticky",
"testPublicClientNonSticky",
"testPrivateServerSticky",
"testPublicServerSticky",
],
peer_ctx_reqs={
"__run_num__": _CtxPropReq(dtype=str, is_private=None, is_sticky=None),
"__identity_name__": _CtxPropReq(dtype=str, is_private=None, is_sticky=None),
"testPublicServerSticky": _CtxPropReq(dtype=str, is_private=None, is_sticky=None),
},
peer_ctx_block_list=[
"__engine__",
"testPrivateServerSticky",
"testPrivateServerNonSticky",
"testPublicServerNonSticky",
],
)
self.event_reqs.update(
{
EventType.BEFORE_TASK_DATA_FILTER: task_data_filter_reqs,
EventType.AFTER_TASK_DATA_FILTER: task_data_filter_reqs,
}
)
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
fl_ctx.set_prop(
key="testPrivateClientSticky", value="this is a client private sticky", private=True, sticky=True
)
fl_ctx.set_prop(
key="testPublicClientSticky", value="this is a client public sticky", private=False, sticky=True
)
fl_ctx.set_prop(
key="testPrivateClientNonSticky",
value="this is a client private non-sticky",
private=True,
sticky=False,
)
fl_ctx.set_prop(
key="testPublicClientNonSticky", value="this is a client public non-sticky", private=False, sticky=False
)
super().handle_event(event_type, fl_ctx)
| NVFlare-main | nvflare/app_common/widgets/event_recorder.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from nvflare.apis.fl_constant import EventScope, FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.widgets.widget import Widget
FED_EVENT_PREFIX = "fed."
class ConvertToFedEvent(Widget):
def __init__(self, events_to_convert: List[str], fed_event_prefix=FED_EVENT_PREFIX):
"""Converts local event to federated events.
Args:
events_to_convert (List[str]): A list of event names to be converted.
fed_event_prefix (str): The prefix that will be added to the converted event's name.
"""
super().__init__()
self.events_to_convert = events_to_convert
self.fed_event_prefix = fed_event_prefix
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type in self.events_to_convert:
event_scope = fl_ctx.get_prop(key=FLContextKey.EVENT_SCOPE, default=EventScope.LOCAL)
if event_scope == EventScope.FEDERATION:
# already a fed event
return
data = fl_ctx.get_prop(FLContextKey.EVENT_DATA, None)
if data is None:
self.log_error(fl_ctx, "Missing event data.")
return
if not isinstance(data, Shareable):
self.log_error(fl_ctx, f"Expect data to be shareable but got {type(data)}")
return
self.fire_fed_event(self.fed_event_prefix + event_type, data, fl_ctx)
| NVFlare-main | nvflare/app_common/widgets/convert_to_fed_event.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/widgets/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os.path
from nvflare.apis.dxo import DataKind, from_shareable, get_leaf_dxos
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.widgets.widget import Widget
class ValidationJsonGenerator(Widget):
def __init__(self, results_dir=AppConstants.CROSS_VAL_DIR, json_file_name="cross_val_results.json"):
"""Catches VALIDATION_RESULT_RECEIVED event and generates a results.json containing accuracy of each
validated model.
Args:
results_dir (str, optional): Name of the results directory. Defaults to cross_site_val
json_file_name (str, optional): Name of the json file. Defaults to cross_val_results.json
"""
super(ValidationJsonGenerator, self).__init__()
self._results_dir = results_dir
self._val_results = {}
self._json_file_name = json_file_name
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self._val_results.clear()
elif event_type == AppEventType.VALIDATION_RESULT_RECEIVED:
model_owner = fl_ctx.get_prop(AppConstants.MODEL_OWNER, None)
data_client = fl_ctx.get_prop(AppConstants.DATA_CLIENT, None)
val_results = fl_ctx.get_prop(AppConstants.VALIDATION_RESULT, None)
if not model_owner:
self.log_error(
fl_ctx, "model_owner unknown. Validation result will not be saved to json", fire_event=False
)
if not data_client:
self.log_error(
fl_ctx, "data_client unknown. Validation result will not be saved to json", fire_event=False
)
if val_results:
try:
dxo = from_shareable(val_results)
dxo.validate()
if dxo.data_kind == DataKind.METRICS:
if data_client not in self._val_results:
self._val_results[data_client] = {}
self._val_results[data_client][model_owner] = dxo.data
elif dxo.data_kind == DataKind.COLLECTION:
# The DXO could contain multiple sub-DXOs (e.g. received from a T2 system)
leaf_dxos, errors = get_leaf_dxos(dxo, data_client)
if errors:
for err in errors:
self.log_error(fl_ctx, f"Bad result from {data_client}: {err}")
for _sub_data_client, _dxo in leaf_dxos.items():
_dxo.validate()
if _sub_data_client not in self._val_results:
self._val_results[_sub_data_client] = {}
self._val_results[_sub_data_client][model_owner] = _dxo.data
else:
self.log_error(
fl_ctx,
f"Expected dxo of kind METRICS or COLLECTION but got {dxo.data_kind} instead.",
fire_event=False,
)
except Exception:
self.log_exception(fl_ctx, "Exception in handling validation result.", fire_event=False)
else:
self.log_error(fl_ctx, "Validation result not found.", fire_event=False)
elif event_type == EventType.END_RUN:
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_job_id())
cross_val_res_dir = os.path.join(run_dir, self._results_dir)
if not os.path.exists(cross_val_res_dir):
os.makedirs(cross_val_res_dir)
res_file_path = os.path.join(cross_val_res_dir, self._json_file_name)
with open(res_file_path, "w") as f:
json.dump(self._val_results, f)
| NVFlare-main | nvflare/app_common/widgets/validation_json_generator.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from threading import Lock
from typing import List, Optional
from nvflare.apis.analytix import AnalyticsData, AnalyticsDataType
from nvflare.apis.dxo import DXO
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import EventScope, FLContextKey, ReservedKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.tracking.tracker_types import LogWriterName, TrackConst
from nvflare.fuel.utils.deprecated import deprecated
from nvflare.widgets.widget import Widget
ANALYTIC_EVENT_TYPE = "analytix_log_stats"
def send_analytic_dxo(comp: FLComponent, dxo: DXO, fl_ctx: FLContext, event_type: str = ANALYTIC_EVENT_TYPE):
"""Sends analytic dxo.
Sends analytic dxo by firing an event (of type "analytix_log_stats" by default unless otherwise specified)
with the dxo in the fl_ctx.
Args:
comp (FLComponent): An FLComponent.
dxo (DXO): analytic data in dxo.
fl_ctx (FLContext): fl context info.
event_type (str): Event type.
"""
if not isinstance(comp, FLComponent):
raise TypeError(f"expect comp to be an instance of FLComponent, but got {type(comp)}")
if not isinstance(dxo, DXO):
raise TypeError(f"expect dxo to be an instance of DXO, but got {type(dxo)}")
if not isinstance(fl_ctx, FLContext):
raise TypeError(f"expect fl_ctx to be an instance of FLContext, but got {type(fl_ctx)}")
fl_ctx.set_prop(key=FLContextKey.EVENT_DATA, value=dxo.to_shareable(), private=True, sticky=False)
comp.fire_event(event_type=event_type, fl_ctx=fl_ctx)
def create_analytic_dxo(
tag: str,
value,
data_type: AnalyticsDataType,
writer: LogWriterName = LogWriterName.TORCH_TB,
**kwargs,
) -> DXO:
"""Creates the analytic DXO.
Args:
tag (str): the tag associated with this value.
value: the analytic data.
data_type: (AnalyticsDataType): analytic data type.
writer (LogWriterName): syntax of the sender: such TensorBoard or MLflow
kwargs: additional arguments to be passed into the receiver side's function.
Returns:
A DXO object that contains the analytic data.
"""
data = AnalyticsData(key=tag, value=value, data_type=data_type, sender=writer, **kwargs)
dxo = data.to_dxo()
return dxo
class AnalyticsSender(Widget):
def __init__(self, event_type=ANALYTIC_EVENT_TYPE, writer_name=LogWriterName.TORCH_TB):
"""Sender for analytics data.
This class has some legacy methods that implement some common methods following signatures from
PyTorch SummaryWriter. New code should use :py:class:`TBWriter <nvflare.app_opt.tracking.tb.tb_writer.TBWriter>` instead,
which contains an AnalyticsSender.
Args:
event_type (str): event type to fire (defaults to "analytix_log_stats").
writer_name: the log writer for syntax information (defaults to LogWriterName.TORCH_TB)
"""
super().__init__()
self.engine = None
self.event_type = event_type
self.writer = writer_name
def get_writer_name(self) -> LogWriterName:
return self.writer
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.ABOUT_TO_START_RUN:
self.engine = fl_ctx.get_engine()
def add(self, tag: str, value, data_type: AnalyticsDataType, global_step: Optional[int] = None, **kwargs):
"""Create and send a DXO by firing an event.
Args:
tag (str): Tag name
value (_type_): Value to send
data_type (AnalyticsDataType): Data type of the value being sent
global_step (optional, int): Global step value.
Raises:
TypeError: global_step must be an int
"""
kwargs = kwargs if kwargs else {}
if global_step is not None:
if not isinstance(global_step, int):
raise TypeError(f"Expect global step to be an instance of int, but got {type(global_step)}")
kwargs[TrackConst.GLOBAL_STEP_KEY] = global_step
dxo = create_analytic_dxo(tag=tag, value=value, data_type=data_type, writer=self.get_writer_name(), **kwargs)
with self.engine.new_context() as fl_ctx:
send_analytic_dxo(self, dxo=dxo, fl_ctx=fl_ctx, event_type=self.event_type)
@deprecated(
"This method is deprecated, please use :py:class:`TBWriter <nvflare.app_opt.tracking.tb.tb_writer.TBWriter>` instead."
)
def add_scalar(self, tag: str, scalar: float, global_step: Optional[int] = None, **kwargs):
"""Legacy method to send a scalar.
This follows the signature from PyTorch SummaryWriter and is here in case it is used in previous code. If
you are writing new code, use :py:class:`TBWriter <nvflare.app_opt.tracking.tb.tb_writer.TBWriter>` instead.
Args:
tag (str): Data identifier.
scalar (float): Value to send.
global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
self.add(tag=tag, value=scalar, data_type=AnalyticsDataType.SCALAR, global_step=global_step, **kwargs)
@deprecated(
"This method is deprecated, please use :py:class:`TBWriter <nvflare.app_opt.tracking.tb.tb_writer.TBWriter>` instead."
)
def add_scalars(self, tag: str, scalars: dict, global_step: Optional[int] = None, **kwargs):
"""Legacy method to send scalars.
This follows the signature from PyTorch SummaryWriter and is here in case it is used in previous code. If
you are writing new code, use :py:class:`TBWriter <nvflare.app_opt.tracking.tb.tb_writer.TBWriter>` instead.
Args:
tag (str): The parent name for the tags.
scalars (dict): Key-value pair storing the tag and corresponding values.
global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
self.add(tag=tag, value=scalars, data_type=AnalyticsDataType.SCALARS, global_step=global_step, **kwargs)
@deprecated(
"This method is deprecated, please use :py:class:`TBWriter <nvflare.app_opt.tracking.tb.tb_writer.TBWriter>` instead."
)
def flush(self):
"""Legacy method to flush out the message.
This follows the signature from PyTorch SummaryWriter and is here in case it is used in previous code. If
you are writing new code, use :py:class:`TBWriter <nvflare.app_opt.tracking.tb.tb_writer.TBWriter>` instead.
This does nothing, it is defined to mimic the PyTorch SummaryWriter.
"""
pass
def close(self):
"""Close resources."""
if self.engine:
self.engine = None
class AnalyticsReceiver(Widget, ABC):
def __init__(self, events: Optional[List[str]] = None):
"""Receives analytic data.
Args:
events (optional, List[str]): A list of event that this receiver will handle.
"""
super().__init__()
if events is None:
events = [ANALYTIC_EVENT_TYPE, f"fed.{ANALYTIC_EVENT_TYPE}"]
self.events = events
self._save_lock = Lock()
self._end = False
@abstractmethod
def initialize(self, fl_ctx: FLContext):
"""Initializes the receiver.
Called after EventType.START_RUN.
Args:
fl_ctx (FLContext): fl context.
"""
pass
@abstractmethod
def save(self, fl_ctx: FLContext, shareable: Shareable, record_origin: str):
"""Saves the received data.
Specific implementations of AnalyticsReceiver will implement save in their own way.
Args:
fl_ctx (FLContext): fl context.
shareable (Shareable): the received message.
record_origin (str): the sender of this message / record.
"""
pass
@abstractmethod
def finalize(self, fl_ctx: FLContext):
"""Finalizes the receiver.
Called after EventType.END_RUN.
Args:
fl_ctx (FLContext): fl context.
"""
pass
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
elif event_type in self.events:
if self._end:
self.log_debug(fl_ctx, f"Already received end run event, drop event {event_type}.", fire_event=False)
return
data = fl_ctx.get_prop(FLContextKey.EVENT_DATA, None)
if data is None:
self.log_error(fl_ctx, "Missing event data.", fire_event=False)
return
if not isinstance(data, Shareable):
self.log_error(
fl_ctx, f"Expect data to be an instance of Shareable but got {type(data)}", fire_event=False
)
return
# if fed event use peer name to save
if fl_ctx.get_prop(FLContextKey.EVENT_SCOPE) == EventScope.FEDERATION:
record_origin = data.get_peer_prop(ReservedKey.IDENTITY_NAME, None)
else:
record_origin = fl_ctx.get_identity_name()
if record_origin is None:
self.log_error(fl_ctx, "record_origin can't be None.", fire_event=False)
return
with self._save_lock:
self.save(shareable=data, fl_ctx=fl_ctx, record_origin=record_origin)
elif event_type == EventType.END_RUN:
self._end = True
self.finalize(fl_ctx)
| NVFlare-main | nvflare/app_common/widgets/streaming.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nvflare.apis.dxo import DataKind, MetaKey, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.security.logging import secure_format_exception
from nvflare.widgets.widget import Widget
class IntimeModelSelector(Widget):
def __init__(
self,
weigh_by_local_iter=False,
aggregation_weights=None,
validation_metric_name=MetaKey.INITIAL_METRICS,
key_metric: str = "val_accuracy",
negate_key_metric: bool = False,
):
"""Handler to determine if the model is globally best.
Args:
weigh_by_local_iter (bool, optional): whether the metrics should be weighted by trainer's iteration number.
aggregation_weights (dict, optional): a mapping of client name to float for aggregation. Defaults to None.
validation_metric_name (str, optional): key used to save initial validation metric in the
DXO meta properties (defaults to MetaKey.INITIAL_METRICS).
key_metric: if metrics are a `dict`, `key_metric` can select the metric used for global model selection.
Defaults to "val_accuracy".
negate_key_metric: Whether to invert the key metric. Should be used if key metric is a loss. Defaults to `False`.
"""
super().__init__()
self.val_metric = self.best_val_metric = -np.inf
self.weigh_by_local_iter = weigh_by_local_iter
self.validation_metric_name = validation_metric_name
self.aggregation_weights = aggregation_weights or {}
self.key_metric = key_metric
self.negate_key_metric = negate_key_metric
self.logger.info(f"model selection weights control: {aggregation_weights}")
self._reset_stats()
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self._startup()
elif event_type == AppEventType.ROUND_STARTED:
self._reset_stats()
elif event_type == AppEventType.BEFORE_CONTRIBUTION_ACCEPT:
self._before_accept(fl_ctx)
elif event_type == AppEventType.BEFORE_AGGREGATION:
self._before_aggregate(fl_ctx)
def _startup(self):
self._reset_stats()
def _reset_stats(self):
self.validation_metric_weighted_sum = 0
self.validation_metric_sum_of_weights = 0
def _before_accept(self, fl_ctx: FLContext):
peer_ctx = fl_ctx.get_peer_context()
shareable: Shareable = peer_ctx.get_prop(FLContextKey.SHAREABLE)
try:
dxo = from_shareable(shareable)
except Exception as e:
self.log_exception(
fl_ctx, f"shareable data is not a valid DXO. Received Exception: {secure_format_exception(e)}"
)
return False
if dxo.data_kind not in (DataKind.WEIGHT_DIFF, DataKind.WEIGHTS, DataKind.COLLECTION):
self.log_debug(fl_ctx, "cannot handle {}".format(dxo.data_kind))
return False
if dxo.data is None:
self.log_debug(fl_ctx, "no data to filter")
return False
contribution_round = shareable.get_cookie(AppConstants.CONTRIBUTION_ROUND)
client_name = peer_ctx.get_identity_name(default="?")
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
if current_round == 0:
self.log_debug(fl_ctx, "skipping round 0")
return False # There is no aggregated model at round 0
if contribution_round != current_round:
self.log_warning(
fl_ctx,
f"discarding shareable from {client_name} for round: {contribution_round}. Current round is: {current_round}",
)
return False
validation_metric = dxo.get_meta_prop(self.validation_metric_name)
if validation_metric is None:
self.log_warning(fl_ctx, f"validation metric not existing in {client_name}")
return False
# select key metric if dictionary of metrics is provided
if isinstance(validation_metric, dict):
if self.key_metric in validation_metric:
validation_metric = validation_metric[self.key_metric]
else:
self.log_warning(
fl_ctx,
f"validation metric `{self.key_metric}` not in metrics from {client_name}: {list(validation_metric.keys())}",
)
return False
if self.negate_key_metric:
validation_metric = -1.0 * validation_metric
self.log_info(fl_ctx, f"validation metric {validation_metric} from client {client_name}")
if self.weigh_by_local_iter:
n_iter = dxo.get_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, 1.0)
else:
n_iter = 1.0
aggregation_weights = self.aggregation_weights.get(client_name, 1.0)
self.log_debug(fl_ctx, f"aggregation weight: {aggregation_weights}")
weight = n_iter * aggregation_weights
self.validation_metric_weighted_sum += validation_metric * weight
self.validation_metric_sum_of_weights += weight
return True
def _before_aggregate(self, fl_ctx):
if self.validation_metric_sum_of_weights == 0:
self.log_debug(fl_ctx, "nothing accumulated")
return False
self.val_metric = self.validation_metric_weighted_sum / self.validation_metric_sum_of_weights
self.logger.debug(f"weighted validation metric {self.val_metric}")
if self.val_metric > self.best_val_metric:
self.best_val_metric = self.val_metric
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.log_info(fl_ctx, f"new best validation metric at round {current_round}: {self.best_val_metric}")
# Fire event to notify that the current global model is a new best
fl_ctx.set_prop(AppConstants.VALIDATION_RESULT, self.best_val_metric, private=True, sticky=False)
self.fire_event(AppEventType.GLOBAL_BEST_MODEL_AVAILABLE, fl_ctx)
self._reset_stats()
return True
class IntimeModelSelectionHandler(IntimeModelSelector):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger.warning("'IntimeModelSelectionHandler' was renamed to 'IntimeModelSelector'")
| NVFlare-main | nvflare/app_common/widgets/intime_model_selector.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/lighter/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import traceback
from abc import ABC
from typing import List
from nvflare.apis.utils.format_check import name_check
class Participant(object):
def __init__(self, type: str, name: str, org: str, enable_byoc: bool = False, *args, **kwargs):
"""Class to represent a participant.
Each participant communicates to other participant. Therefore, each participant has its
own name, type, organization it belongs to, rules and other information.
Args:
type (str): server, client, admin or other string that builders can handle
name (str): system-wide unique name
org (str): system-wide unique organization
enable_byoc (bool, optional): whether this participant allows byoc codes to be loaded. Defaults to False.
Raises:
ValueError: if name or org is not compliant with characters or format specification.
"""
err, reason = name_check(name, type)
if err:
raise ValueError(reason)
err, reason = name_check(org, "org")
if err:
raise ValueError(reason)
self.type = type
self.name = name
self.org = org
self.subject = name
self.enable_byoc = enable_byoc
self.props = kwargs
class Project(object):
def __init__(self, name: str, description: str, participants: List[Participant]):
"""A container class to hold information about this FL project.
This class only holds information. It does not drive the workflow.
Args:
name (str): the project name
description (str): brief description on this name
participants (List[Participant]): All the participants that will join this project
Raises:
ValueError: when duplicate name found in participants list
"""
self.name = name
all_names = list()
for p in participants:
if p.name in all_names:
raise ValueError(f"Unable to add a duplicate name {p.name} into this project.")
else:
all_names.append(p.name)
self.description = description
self.participants = participants
def get_participants_by_type(self, type, first_only=True):
found = list()
for p in self.participants:
if p.type == type:
if first_only:
return p
else:
found.append(p)
return found
class Builder(ABC):
def initialize(self, ctx: dict):
pass
def build(self, project: Project, ctx: dict):
pass
def finalize(self, ctx: dict):
pass
def get_wip_dir(self, ctx: dict):
return ctx.get("wip_dir")
def get_ws_dir(self, participate: Participant, ctx: dict):
return os.path.join(self.get_wip_dir(ctx), participate.name)
def get_kit_dir(self, participant: Participant, ctx: dict):
return os.path.join(self.get_ws_dir(participant, ctx), "startup")
def get_transfer_dir(self, participant: Participant, ctx: dict):
return os.path.join(self.get_ws_dir(participant, ctx), "transfer")
def get_local_dir(self, participant: Participant, ctx: dict):
return os.path.join(self.get_ws_dir(participant, ctx), "local")
def get_state_dir(self, ctx: dict):
return ctx.get("state_dir")
def get_resources_dir(self, ctx: dict):
return ctx.get("resources_dir")
class Provisioner(object):
def __init__(self, root_dir: str, builders: List[Builder]):
"""Workflow class that drive the provision process.
Provisioner's tasks:
- Maintain the provision workspace folder structure;
- Invoke Builders to generate the content of each startup kit
ROOT_WORKSPACE Folder Structure::
root_workspace_dir_name: this is the root of the workspace
project_dir_name: the root dir of the project, could be named after the project
resources: stores resource files (templates, configs, etc.) of the Provisioner and Builders
prod: stores the current set of startup kits (production)
participate_dir: stores content files generated by builders
wip: stores the set of startup kits to be created (WIP)
participate_dir: stores content files generated by builders
state: stores the persistent state of the Builders
Args:
root_dir (str): the directory path to hold all generated or intermediate folders
builders (List[Builder]): all builders that will be called to build the content
"""
self.root_dir = root_dir
self.builders = builders
self.ctx = None
def _make_dir(self, dirs):
for dir in dirs:
if not os.path.exists(dir):
os.makedirs(dir)
def _prepare_workspace(self, ctx):
workspace = ctx.get("workspace")
wip_dir = os.path.join(workspace, "wip")
state_dir = os.path.join(workspace, "state")
resources_dir = os.path.join(workspace, "resources")
ctx.update(dict(wip_dir=wip_dir, state_dir=state_dir, resources_dir=resources_dir))
dirs = [workspace, resources_dir, wip_dir, state_dir]
self._make_dir(dirs)
def provision(self, project: Project):
# ctx = {"workspace": os.path.join(self.root_dir, project.name), "project": project}
workspace = os.path.join(self.root_dir, project.name)
ctx = {"workspace": workspace} # project is more static information while ctx is dynamic
self._prepare_workspace(ctx)
try:
for b in self.builders:
b.initialize(ctx)
# call builders!
for b in self.builders:
b.build(project, ctx)
for b in self.builders[::-1]:
b.finalize(ctx)
except Exception as ex:
prod_dir = ctx.get("current_prod_dir")
if prod_dir:
shutil.rmtree(prod_dir)
print("Exception raised during provision. Incomplete prod_n folder removed.")
traceback.print_exc()
finally:
wip_dir = ctx.get("wip_dir")
if wip_dir:
shutil.rmtree(wip_dir)
return ctx
| NVFlare-main | nvflare/lighter/spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def main():
print("*****************************************************************")
print("** poc command is deprecated, please use 'nvflare poc' instead **")
print("*****************************************************************")
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/lighter/poc.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
from base64 import b64decode, b64encode
import yaml
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
from nvflare.lighter.impl.cert import load_crt
def generate_password(passlen=16):
s = "abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ"
p = "".join(random.sample(s, passlen))
return p
def sign_one(content, signing_pri_key):
signature = signing_pri_key.sign(
data=content,
padding=padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH,
),
algorithm=hashes.SHA256(),
)
return b64encode(signature).decode("utf-8")
def load_private_key_file(file_path):
with open(file_path, "rt") as f:
pri_key = serialization.load_pem_private_key(f.read().encode("ascii"), password=None, backend=default_backend())
return pri_key
def sign_folders(folder, signing_pri_key, crt_path, max_depth=9999):
depth = 0
for root, folders, files in os.walk(folder):
depth = depth + 1
signatures = dict()
for file in files:
if file == ".__nvfl_sig.json" or file == ".__nvfl_submitter.crt":
continue
signature = signing_pri_key.sign(
data=open(os.path.join(root, file), "rb").read(),
padding=padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH,
),
algorithm=hashes.SHA256(),
)
signatures[file] = b64encode(signature).decode("utf-8")
for folder in folders:
signature = signing_pri_key.sign(
data=folder.encode("utf-8"),
padding=padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH,
),
algorithm=hashes.SHA256(),
)
signatures[folder] = b64encode(signature).decode("utf-8")
json.dump(signatures, open(os.path.join(root, ".__nvfl_sig.json"), "wt"))
shutil.copyfile(crt_path, os.path.join(root, ".__nvfl_submitter.crt"))
if depth >= max_depth:
break
def verify_folder_signature(src_folder, root_ca_path):
try:
root_ca_cert = load_crt(root_ca_path)
root_ca_public_key = root_ca_cert.public_key()
for root, folders, files in os.walk(src_folder):
try:
signatures = json.load(open(os.path.join(root, ".__nvfl_sig.json"), "rt"))
cert = load_crt(os.path.join(root, ".__nvfl_submitter.crt"))
public_key = cert.public_key()
except:
continue # TODO: shall return False
root_ca_public_key.verify(
cert.signature, cert.tbs_certificate_bytes, padding.PKCS1v15(), cert.signature_hash_algorithm
)
for k in signatures:
signatures[k] = b64decode(signatures[k].encode("utf-8"))
for file in files:
if file == ".__nvfl_sig.json" or file == ".__nvfl_submitter.crt":
continue
signature = signatures.get(file)
if signature:
public_key.verify(
signature=signature,
data=open(os.path.join(root, file), "rb").read(),
padding=padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH),
algorithm=hashes.SHA256(),
)
for folder in folders:
signature = signatures.get(folder)
if signature:
public_key.verify(
signature=signature,
data=folder.encode("utf-8"),
padding=padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH),
algorithm=hashes.SHA256(),
)
return True
except Exception as e:
return False
def sign_all(content_folder, signing_pri_key):
signatures = dict()
for f in os.listdir(content_folder):
path = os.path.join(content_folder, f)
if os.path.isfile(path):
signature = signing_pri_key.sign(
data=open(path, "rb").read(),
padding=padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH,
),
algorithm=hashes.SHA256(),
)
signatures[f] = b64encode(signature).decode("utf-8")
return signatures
def load_yaml(file):
if isinstance(file, str):
return yaml.safe_load(open(file, "r"))
elif isinstance(file, bytes):
return yaml.safe_load(file)
else:
return None
def sh_replace(src, mapping_dict):
result = src
for k, v in mapping_dict.items():
result = result.replace("{~~" + k + "~~}", str(v))
return result
def update_project_server_name_config(project_config: dict, old_server_name, server_name) -> dict:
update_participant_server_name(project_config, old_server_name, server_name)
update_overseer_server_name(project_config, old_server_name, server_name)
return project_config
def update_overseer_server_name(project_config, old_server_name, server_name):
# update overseer_agent builder
builders = project_config.get("builders", [])
for b in builders:
if "args" in b:
if "overseer_agent" in b["args"]:
end_point = b["args"]["overseer_agent"]["args"]["sp_end_point"]
new_end_point = end_point.replace(old_server_name, server_name)
b["args"]["overseer_agent"]["args"]["sp_end_point"] = new_end_point
def update_participant_server_name(project_config, old_server_name, new_server_name):
participants = project_config["participants"]
for p in participants:
if p["type"] == "server" and p["name"] == old_server_name:
p["name"] = new_server_name
return
def update_project_server_name(project_file: str, old_server_name, server_name):
with open(project_file, "r") as file:
project_config = yaml.safe_load(file)
if not project_config:
raise RuntimeError("project_config is empty")
update_project_server_name_config(project_config, old_server_name, server_name)
with open(project_file, "w") as file:
yaml.dump(project_config, file)
def update_storage_locations(
local_dir: str,
workspace: str,
default_resource_name: str = "resources.json.default",
job_storage_name: str = "jobs-storage",
snapshot_storage_name: str = "snapshot-storage",
):
"""Creates resources.json with snapshot-storage and jobs-storage set as folders directly under the workspace
for the provided local_dir."""
default_resource = f"{local_dir}/{default_resource_name}"
target_resource = f"{local_dir}/resources.json"
job_storage = f"{workspace}/{job_storage_name}"
snapshot_storage = f"{workspace}/{snapshot_storage_name}"
# load resources.json
with open(default_resource, "r") as f:
resources = json.load(f)
# update resources
resources["snapshot_persistor"]["args"]["storage"]["args"]["root_dir"] = snapshot_storage
components = resources["components"]
job_mgr_comp = [comp for comp in components if comp["id"] == "job_manager"][0]
job_mgr_comp["args"]["uri_root"] = job_storage
# Serializing json, Writing to resources.json
json_object = json.dumps(resources, indent=4)
with open(target_resource, "w") as outfile:
outfile.write(json_object)
| NVFlare-main | nvflare/lighter/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import os
import pathlib
import shutil
import sys
from typing import Optional
from nvflare.fuel.utils.class_utils import instantiate_class
from nvflare.lighter.spec import Participant, Project, Provisioner
from nvflare.lighter.utils import load_yaml
adding_client_error_msg = """
name: $SITE-NAME
org: $ORGANIZATION_NAME
components:
resource_manager: # This id is reserved by system. Do not change it.
path: nvflare.app_common.resource_managers.gpu_resource_manager.GPUResourceManager
args:
num_of_gpus: 4,
mem_per_gpu_in_GiB: 16
resource_consumer: # This id is reserved by system. Do not change it.
path: nvflare.app_common.resource_consumers.gpu_resource_consumer.GPUResourceConsumer
args:
"""
adding_user_error_msg = """
name: $USER_EMAIL_ADDRESS
org: $ORGANIZATION_NAME
role: $ROLE
"""
def define_provision_parser(parser):
parser.add_argument("-p", "--project_file", type=str, default="project.yml", help="file to describe FL project")
parser.add_argument("-w", "--workspace", type=str, default="workspace", help="directory used by provision")
parser.add_argument("-c", "--custom_folder", type=str, default=".", help="additional folder to load python codes")
parser.add_argument("--add_user", type=str, default="", help="yaml file for added user")
parser.add_argument("--add_client", type=str, default="", help="yaml file for added client")
def has_no_arguments() -> bool:
last_item = sys.argv[-1]
return last_item.endswith("provision") or last_item.endswith("provision.py")
def handle_provision(args):
file_path = pathlib.Path(__file__).parent.absolute()
current_path = os.getcwd()
custom_folder_path = os.path.join(current_path, args.custom_folder)
sys.path.append(custom_folder_path)
# main project file
project_file = args.project_file
current_project_yml = os.path.join(current_path, "project.yml")
if has_no_arguments() and not os.path.exists(current_project_yml):
files = {"1": "ha_project.yml", "2": "dummy_project.yml", "3": None}
print("No project.yml found in current folder.\nThere are two types of templates for project.yml.")
print(
"1) project.yml for HA mode\n2) project.yml for non-HA mode\n3) Don't generate project.yml. Exit this program."
)
answer = input(f"Which type of project.yml should be generated at {current_project_yml} for you? (1/2/3) ")
answer = answer.strip()
src_project = files.get(answer, None)
if src_project:
shutil.copyfile(os.path.join(file_path, src_project), current_project_yml)
print(
f"{current_project_yml} was created. Please edit it to fit your FL configuration. "
+ "Once done please run nvflare provision command again with newly edited project.yml file"
)
else:
print(f"{answer} was selected. No project.yml was created.")
exit(0)
workspace = args.workspace
workspace_full_path = os.path.join(current_path, workspace)
project_full_path = os.path.join(current_path, project_file)
print(f"Project yaml file: {project_full_path}.")
add_user_full_path = os.path.join(current_path, args.add_user) if args.add_user else None
add_client_full_path = os.path.join(current_path, args.add_client) if args.add_client else None
provision(project_full_path, workspace_full_path, add_user_full_path, add_client_full_path)
def gen_default_project_config(src_project_name, dest_project_file):
file_path = pathlib.Path(__file__).parent.absolute()
shutil.copyfile(os.path.join(file_path, src_project_name), dest_project_file)
def provision(
project_full_path: str,
workspace_full_path: str,
add_user_full_path: Optional[str] = None,
add_client_full_path: Optional[str] = None,
):
project_dict = load_yaml(project_full_path)
project = prepare_project(project_dict, add_user_full_path, add_client_full_path)
builders = prepare_builders(project_dict)
provisioner = Provisioner(workspace_full_path, builders)
provisioner.provision(project)
def prepare_builders(project_dict):
builders = list()
for b in project_dict.get("builders"):
path = b.get("path")
args = b.get("args")
builders.append(instantiate_class(path, args))
return builders
def prepare_project(project_dict, add_user_file_path=None, add_client_file_path=None):
api_version = project_dict.get("api_version")
if api_version not in [3]:
raise ValueError(f"API version expected 3 but found {api_version}")
project_name = project_dict.get("name")
project_description = project_dict.get("description", "")
participants = list()
for p in project_dict.get("participants"):
participants.append(Participant(**p))
if add_user_file_path:
add_extra_users(add_user_file_path, participants)
if add_client_file_path:
add_extra_clients(add_client_file_path, participants)
project = Project(name=project_name, description=project_description, participants=participants)
n_servers = len(project.get_participants_by_type("server", first_only=False))
if n_servers > 2:
raise ValueError(
f"Configuration error: Expect 2 or 1 server to be provisioned. project contains {n_servers} servers."
)
return project
def add_extra_clients(add_client_file_path, participants):
try:
extra = load_yaml(add_client_file_path)
extra.update({"type": "client"})
participants.append(Participant(**extra))
except Exception as e:
print("** Error during adding client **")
print("The yaml file format is")
print(adding_client_error_msg)
exit(0)
def add_extra_users(add_user_file_path, participants):
try:
extra = load_yaml(add_user_file_path)
extra.update({"type": "admin"})
participants.append(Participant(**extra))
except Exception:
print("** Error during adding user **")
print("The yaml file format is")
print(adding_user_error_msg)
exit(0)
def main():
print("*****************************************************************************")
print("** provision command is deprecated, please use 'nvflare provision' instead **")
print("*****************************************************************************")
parser = argparse.ArgumentParser()
define_provision_parser(parser)
args = parser.parse_args()
handle_provision(args)
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/lighter/provision.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FlareServiceConstants(object):
FLARE_PROJ_ADMIN = "admin@nvidia.com"
FLARE_SERVER = "server"
FLARE_CLIENTS = "clients"
FLARE_OVERSEER = "overseer"
STARTUP = "startup"
CMD_START = "start"
CMD_STOP = "stop"
EXAMPLES = "examples"
TRANSFER = "transfer"
IS_DOCKER_RUN = "is_docker_run"
| NVFlare-main | nvflare/lighter/service_constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Template:
def __init__(self, template):
self.template = template
def get_cloud_script_header(self):
return self.template.get("cloud_script_header")
| NVFlare-main | nvflare/lighter/tplt_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tenseal as ts
from nvflare.lighter.spec import Builder
class HEBuilder(Builder):
def __init__(
self,
poly_modulus_degree=8192,
coeff_mod_bit_sizes=[60, 40, 40],
scale_bits=40,
scheme="CKKS",
):
"""Build Homomorphic related contents.
Generates Tenseal homomorphic encryption context for server and client and writes them to server and client
participant folders.
Args:
poly_modulus_degree: defaults to 8192.
coeff_mod_bit_sizes: defaults to [60, 40, 40].
scale_bits: defaults to 40.
scheme: defaults to "CKKS".
"""
self._context = None
self.scheme_type_mapping = {
"CKKS": ts.SCHEME_TYPE.CKKS,
"BFV": ts.SCHEME_TYPE.BFV,
}
self.poly_modulus_degree = poly_modulus_degree
self.coeff_mod_bit_sizes = coeff_mod_bit_sizes
self.scale_bits = scale_bits
_scheme = scheme
# Setup TenSEAL context
self.scheme_type = self.scheme_type_mapping[_scheme]
self.serialized = None
def initialize(self, ctx):
self._context = ts.context(
self.scheme_type,
poly_modulus_degree=self.poly_modulus_degree,
coeff_mod_bit_sizes=self.coeff_mod_bit_sizes,
encryption_type=ts.ENCRYPTION_TYPE.SYMMETRIC,
)
# dynamically call different generate keys method
# getattr(self._context, f'generate_{self.key_type}_keys')()
self._context.generate_relin_keys()
self._context.global_scale = 2**self.scale_bits
def build(self, project, ctx):
servers = project.get_participants_by_type("server", first_only=False)
for server in servers:
dest_dir = self.get_kit_dir(server, ctx)
with open(os.path.join(dest_dir, "server_context.tenseal"), "wb") as f:
f.write(self.get_serialized_context())
for client in project.get_participants_by_type("client", first_only=False):
dest_dir = self.get_kit_dir(client, ctx)
with open(os.path.join(dest_dir, "client_context.tenseal"), "wb") as f:
f.write(self.get_serialized_context(is_client=True))
def get_serialized_context(self, is_client=False):
_serialized_context = self._context.serialize(
save_public_key=is_client,
save_secret_key=is_client,
save_galois_keys=False,
save_relin_keys=True,
)
return _serialized_context
| NVFlare-main | nvflare/lighter/impl/he.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/lighter/impl/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import shutil
import yaml
from nvflare.lighter.spec import Builder
class DockerBuilder(Builder):
def __init__(self, base_image="python:3.8", requirements_file="requirements.txt"):
"""Build docker compose file."""
self.base_image = base_image
self.requirements_file = requirements_file
def _build_overseer(self, overseer, ctx):
protocol = overseer.props.get("protocol", "http")
default_port = "443" if protocol == "https" else "80"
port = overseer.props.get("port", default_port)
info_dict = copy.deepcopy(self.services["__overseer__"])
info_dict["volumes"] = [f"./{overseer.name}:" + "${WORKSPACE}"]
info_dict["ports"] = [f"{port}:{port}"]
info_dict["build"] = "nvflare_compose"
info_dict["container_name"] = overseer.name
self.services[overseer.name] = info_dict
def _build_server(self, server, ctx):
fed_learn_port = server.props.get("fed_learn_port", 8002)
admin_port = server.props.get("admin_port", 8003)
info_dict = copy.deepcopy(self.services["__flserver__"])
info_dict["volumes"][0] = f"./{server.name}:" + "${WORKSPACE}"
info_dict["ports"] = [f"{fed_learn_port}:{fed_learn_port}", f"{admin_port}:{admin_port}"]
for i in range(len(info_dict["command"])):
if info_dict["command"][i] == "flserver":
info_dict["command"][i] = server.name
if info_dict["command"][i] == "org=__org_name__":
info_dict["command"][i] = f"org={server.org}"
info_dict["container_name"] = server.name
self.services[server.name] = info_dict
def _build_client(self, client, ctx):
info_dict = copy.deepcopy(self.services["__flclient__"])
info_dict["volumes"] = [f"./{client.name}:" + "${WORKSPACE}"]
for i in range(len(info_dict["command"])):
if info_dict["command"][i] == "flclient":
info_dict["command"][i] = client.name
if info_dict["command"][i] == "uid=__flclient__":
info_dict["command"][i] = f"uid={client.name}"
if info_dict["command"][i] == "org=__org_name__":
info_dict["command"][i] = f"org={client.org}"
info_dict["container_name"] = client.name
self.services[client.name] = info_dict
def build(self, project, ctx):
self.template = ctx.get("template")
self.compose = yaml.safe_load(self.template.get("compose_yaml"))
self.services = self.compose.get("services")
self.compose_file_path = os.path.join(self.get_wip_dir(ctx), "compose.yaml")
overseer = project.get_participants_by_type("overseer")
if overseer:
self._build_overseer(overseer, ctx)
servers = project.get_participants_by_type("server", first_only=False)
for server in servers:
self._build_server(server, ctx)
for client in project.get_participants_by_type("client", first_only=False):
self._build_client(client, ctx)
self.services.pop("__overseer__", None)
self.services.pop("__flserver__", None)
self.services.pop("__flclient__", None)
self.compose["services"] = self.services
with open(self.compose_file_path, "wt") as f:
yaml.dump(self.compose, f)
env_file_path = os.path.join(self.get_wip_dir(ctx), ".env")
with open(env_file_path, "wt") as f:
f.write("WORKSPACE=/workspace\n")
f.write("PYTHON_EXECUTABLE=/usr/local/bin/python3\n")
f.write("IMAGE_NAME=nvflare-service\n")
compose_build_dir = os.path.join(self.get_wip_dir(ctx), "nvflare_compose")
os.mkdir(compose_build_dir)
with open(os.path.join(compose_build_dir, "Dockerfile"), "wt") as f:
f.write(f"FROM {self.base_image}\n")
f.write(self.template.get("dockerfile"))
try:
shutil.copyfile(self.requirements_file, os.path.join(compose_build_dir, "requirements.txt"))
except Exception:
f = open(os.path.join(compose_build_dir, "requirements.txt"), "wt")
f.close()
| NVFlare-main | nvflare/lighter/impl/docker.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from nvflare.lighter.spec import Builder
def serialize_pri_key(pri_key):
return pri_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
def serialize_cert(cert):
return cert.public_bytes(serialization.Encoding.PEM)
def load_crt(path):
serialized_cert = open(path, "rb").read()
return x509.load_pem_x509_certificate(serialized_cert, default_backend())
class CertBuilder(Builder):
def __init__(self):
"""Build certificate chain for every participant.
Handles building (creating and self-signing) the root CA certificates, creating server, client and
admin certificates, and having them signed by the root CA for secure communication. If the state folder has
information about previously generated certs, it loads them back and reuses them.
"""
self.root_cert = None
self.persistent_state = dict()
def initialize(self, ctx):
state_dir = self.get_state_dir(ctx)
cert_file = os.path.join(state_dir, "cert.json")
if os.path.exists(cert_file):
self.persistent_state = json.load(open(cert_file, "rt"))
self.serialized_cert = self.persistent_state["root_cert"].encode("ascii")
self.root_cert = x509.load_pem_x509_certificate(self.serialized_cert, default_backend())
self.pri_key = serialization.load_pem_private_key(
self.persistent_state["root_pri_key"].encode("ascii"), password=None, backend=default_backend()
)
self.pub_key = self.pri_key.public_key()
self.subject = self.root_cert.subject
self.issuer = self.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
def _build_root(self, subject, subject_org):
if not self.persistent_state:
pri_key, pub_key = self._generate_keys()
self.issuer = subject
self.root_cert = self._generate_cert(subject, subject_org, self.issuer, pri_key, pub_key, ca=True)
self.pri_key = pri_key
self.pub_key = pub_key
self.serialized_cert = serialize_cert(self.root_cert)
self.persistent_state["root_cert"] = self.serialized_cert.decode("ascii")
self.persistent_state["root_pri_key"] = serialize_pri_key(self.pri_key).decode("ascii")
def _build_write_cert_pair(self, participant, base_name, ctx):
subject = self.get_subject(participant)
if self.persistent_state and subject in self.persistent_state:
cert = x509.load_pem_x509_certificate(
self.persistent_state[subject]["cert"].encode("ascii"), default_backend()
)
pri_key = serialization.load_pem_private_key(
self.persistent_state[subject]["pri_key"].encode("ascii"), password=None, backend=default_backend()
)
else:
pri_key, cert = self.get_pri_key_cert(participant)
self.persistent_state[subject] = dict(
cert=serialize_cert(cert).decode("ascii"), pri_key=serialize_pri_key(pri_key).decode("ascii")
)
dest_dir = self.get_kit_dir(participant, ctx)
with open(os.path.join(dest_dir, f"{base_name}.crt"), "wb") as f:
f.write(serialize_cert(cert))
with open(os.path.join(dest_dir, f"{base_name}.key"), "wb") as f:
f.write(serialize_pri_key(pri_key))
pkcs12 = serialization.pkcs12.serialize_key_and_certificates(
subject.encode("ascii"), pri_key, cert, None, serialization.BestAvailableEncryption(subject.encode("ascii"))
)
with open(os.path.join(dest_dir, f"{base_name}.pfx"), "wb") as f:
f.write(pkcs12)
with open(os.path.join(dest_dir, "rootCA.pem"), "wb") as f:
f.write(self.serialized_cert)
def build(self, project, ctx):
self._build_root(project.name, subject_org=None)
ctx["root_cert"] = self.root_cert
ctx["root_pri_key"] = self.pri_key
overseer = project.get_participants_by_type("overseer")
if overseer:
self._build_write_cert_pair(overseer, "overseer", ctx)
servers = project.get_participants_by_type("server", first_only=False)
for server in servers:
self._build_write_cert_pair(server, "server", ctx)
for client in project.get_participants_by_type("client", first_only=False):
self._build_write_cert_pair(client, "client", ctx)
for admin in project.get_participants_by_type("admin", first_only=False):
self._build_write_cert_pair(admin, "client", ctx)
def get_pri_key_cert(self, participant):
pri_key, pub_key = self._generate_keys()
subject = self.get_subject(participant)
subject_org = participant.org
if participant.type == "admin":
role = participant.props.get("role")
else:
role = None
cert = self._generate_cert(subject, subject_org, self.issuer, self.pri_key, pub_key, role=role)
return pri_key, cert
def get_subject(self, participant):
return participant.subject
def _generate_keys(self):
pri_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
pub_key = pri_key.public_key()
return pri_key, pub_key
def _generate_cert(
self, subject, subject_org, issuer, signing_pri_key, subject_pub_key, valid_days=360, ca=False, role=None
):
x509_subject = self._x509_name(subject, subject_org, role)
x509_issuer = self._x509_name(issuer)
builder = (
x509.CertificateBuilder()
.subject_name(x509_subject)
.issuer_name(x509_issuer)
.public_key(subject_pub_key)
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(
# Our certificate will be valid for 360 days
datetime.datetime.utcnow()
+ datetime.timedelta(days=valid_days)
# Sign our certificate with our private key
)
.add_extension(x509.SubjectAlternativeName([x509.DNSName(subject)]), critical=False)
)
if ca:
builder = (
builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(subject_pub_key),
critical=False,
)
.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_public_key(subject_pub_key),
critical=False,
)
.add_extension(x509.BasicConstraints(ca=True, path_length=None), critical=False)
)
return builder.sign(signing_pri_key, hashes.SHA256(), default_backend())
def _x509_name(self, cn_name, org_name=None, role=None):
name = [x509.NameAttribute(NameOID.COMMON_NAME, cn_name)]
if org_name is not None:
name.append(x509.NameAttribute(NameOID.ORGANIZATION_NAME, org_name))
if role:
name.append(x509.NameAttribute(NameOID.UNSTRUCTURED_NAME, role))
return x509.Name(name)
def finalize(self, ctx):
state_dir = self.get_state_dir(ctx)
cert_file = os.path.join(state_dir, "cert.json")
json.dump(self.persistent_state, open(cert_file, "wt"))
| NVFlare-main | nvflare/lighter/impl/cert.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from nvflare.lighter.spec import Builder
from nvflare.lighter.utils import load_yaml
class TemplateBuilder(Builder):
"""Load template file.
Loads the content of the template_file and the authz_def (section of template file with fixed authorization
definitions) into two key-value pairs in the build context.
"""
def initialize(self, ctx):
resource_dir = self.get_resources_dir(ctx)
template_file = ctx.get("template_file")
template = load_yaml(os.path.join(resource_dir, template_file))
authz_def = json.loads(template.get("authz_def"))
ctx["template"] = template
ctx["authz_def"] = authz_def
| NVFlare-main | nvflare/lighter/impl/template.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
from nvflare.lighter.spec import Builder
class HelmChartBuilder(Builder):
def __init__(self, docker_image):
"""Build Helm Chart."""
self.docker_image = docker_image
def initialize(self, ctx):
self.helm_chart_directory = os.path.join(self.get_wip_dir(ctx), "nvflare_hc")
os.mkdir(self.helm_chart_directory)
def _build_overseer(self, overseer, ctx):
protocol = overseer.props.get("protocol", "http")
default_port = "443" if protocol == "https" else "80"
port = overseer.props.get("port", default_port)
self.deployment_overseer["spec"]["template"]["spec"]["volumes"][0]["hostPath"][
"path"
] = "{{ .Values.workspace }}"
self.deployment_overseer["spec"]["template"]["spec"]["containers"][0]["ports"][0]["containerPort"] = port
self.deployment_overseer["spec"]["template"]["spec"]["containers"][0]["image"] = self.docker_image
self.deployment_overseer["spec"]["template"]["spec"]["containers"][0]["command"][
0
] = f"/workspace/{overseer.name}/startup/start.sh"
with open(os.path.join(self.helm_chart_templates_directory, "deployment_overseer.yaml"), "wt") as f:
yaml.dump(self.deployment_overseer, f)
self.service_overseer["spec"]["ports"][0]["port"] = port
self.service_overseer["spec"]["ports"][0]["targetPort"] = port
with open(os.path.join(self.helm_chart_templates_directory, "service_overseer.yaml"), "wt") as f:
yaml.dump(self.service_overseer, f)
def _build_server(self, server, ctx):
fed_learn_port = server.props.get("fed_learn_port", 30002)
admin_port = server.props.get("admin_port", 30003)
idx = ctx["index"]
self.deployment_server["metadata"]["name"] = f"{server.name}"
self.deployment_server["metadata"]["labels"]["system"] = f"{server.name}"
self.deployment_server["spec"]["selector"]["matchLabels"]["system"] = f"{server.name}"
self.deployment_server["spec"]["template"]["metadata"]["labels"]["system"] = f"{server.name}"
self.deployment_server["spec"]["template"]["spec"]["volumes"][0]["hostPath"]["path"] = "{{ .Values.workspace }}"
self.deployment_server["spec"]["template"]["spec"]["volumes"][1]["hostPath"]["path"] = "{{ .Values.persist }}"
self.deployment_server["spec"]["template"]["spec"]["containers"][0]["name"] = f"{server.name}"
self.deployment_server["spec"]["template"]["spec"]["containers"][0]["image"] = self.docker_image
self.deployment_server["spec"]["template"]["spec"]["containers"][0]["ports"][0][
"containerPort"
] = fed_learn_port
self.deployment_server["spec"]["template"]["spec"]["containers"][0]["ports"][1]["containerPort"] = admin_port
cmd_args = self.deployment_server["spec"]["template"]["spec"]["containers"][0]["args"]
for i, item in enumerate(cmd_args):
if "/workspace/server" in item:
cmd_args[i] = f"/workspace/{server.name}"
if "__org_name__" in item:
cmd_args[i] = f"org={server.org}"
self.deployment_server["spec"]["template"]["spec"]["containers"][0]["args"] = cmd_args
with open(os.path.join(self.helm_chart_templates_directory, f"deployment_server{idx}.yaml"), "wt") as f:
yaml.dump(self.deployment_server, f)
self.service_server["metadata"]["name"] = f"{server.name}"
self.service_server["metadata"]["labels"]["system"] = f"{server.name}"
self.service_server["spec"]["selector"]["system"] = f"{server.name}"
self.service_server["spec"]["ports"][0]["name"] = "fl-port"
self.service_server["spec"]["ports"][0]["port"] = fed_learn_port
self.service_server["spec"]["ports"][0]["targetPort"] = fed_learn_port
self.service_server["spec"]["ports"][1]["name"] = "admin-port"
self.service_server["spec"]["ports"][1]["port"] = admin_port
self.service_server["spec"]["ports"][1]["targetPort"] = admin_port
with open(os.path.join(self.helm_chart_templates_directory, f"service_server{idx}.yaml"), "wt") as f:
yaml.dump(self.service_server, f)
def build(self, project, ctx):
self.template = ctx.get("template")
with open(os.path.join(self.helm_chart_directory, "Chart.yaml"), "wt") as f:
yaml.dump(yaml.safe_load(self.template.get("helm_chart_chart")), f)
with open(os.path.join(self.helm_chart_directory, "values.yaml"), "wt") as f:
yaml.dump(yaml.safe_load(self.template.get("helm_chart_values")), f)
self.service_overseer = yaml.safe_load(self.template.get("helm_chart_service_overseer"))
self.service_server = yaml.safe_load(self.template.get("helm_chart_service_server"))
self.deployment_overseer = yaml.safe_load(self.template.get("helm_chart_deployment_overseer"))
self.deployment_server = yaml.safe_load(self.template.get("helm_chart_deployment_server"))
self.helm_chart_templates_directory = os.path.join(self.helm_chart_directory, "templates")
os.mkdir(self.helm_chart_templates_directory)
overseer = project.get_participants_by_type("overseer")
self._build_overseer(overseer, ctx)
servers = project.get_participants_by_type("server", first_only=False)
for index, server in enumerate(servers):
ctx["index"] = index
self._build_server(server, ctx)
| NVFlare-main | nvflare/lighter/impl/helm_chart.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import shutil
from nvflare.lighter.spec import Builder, Project
class WorkspaceBuilder(Builder):
def __init__(self, template_file):
"""Manages the folder structure for provisioned projects.
Sets the template_file containing scripts and configs to put into startup folders, creates directories for the
participants, and moves the provisioned project to the final location at the end
($WORKSPACE/$PROJECT_NAME/prod_XX). WorkspaceBuilder manages and sets the number in prod_XX by incrementing from
the last time provision was run for this project in this workspace, starting with 00 to a max of 99.
Each time the provisioning tool runs, it requires a workspace folder in the local file system. The workspace
will have the following folder structure:
.. code-block:: text
$WORKSPACE/ <--- this is assigned by -w option of provision command (default is workspace)
$PROJECT_NAME/ <--- this is the name value in the project.yml file
prod_00/ <--- a new prod_NN folder is created if provision does not have any errors.
prod_01/
...
resources/ <--- this folder stores resources for other builders to load
state/ <--- this folder stores persistent information (such as certificates) so subsequent runs of the provision command can load the state back.
wip/ <--- this is only used during runtime, and will be removed when the provision command exits
Args:
template_file: name of template file containing scripts and configs to put into startup folders
"""
self.template_file = template_file
def _make_dir(self, dirs):
for dir in dirs:
if not os.path.exists(dir):
os.makedirs(dir)
def initialize(self, ctx):
workspace_dir = ctx["workspace"]
prod_dirs = [_ for _ in os.listdir(workspace_dir) if _.startswith("prod_")]
last = -1
for dir in prod_dirs:
stage = int(dir.split("_")[-1])
if stage > last:
last = stage
ctx["last_prod_stage"] = last
template_file_full_path = os.path.join(self.get_resources_dir(ctx), self.template_file)
file_path = pathlib.Path(__file__).parent.absolute()
shutil.copyfile(os.path.join(file_path, self.template_file), template_file_full_path)
ctx["template_file"] = self.template_file
def build(self, project: Project, ctx: dict):
dirs = [self.get_kit_dir(p, ctx) for p in project.participants]
self._make_dir(dirs)
dirs = [self.get_transfer_dir(p, ctx) for p in project.participants]
self._make_dir(dirs)
dirs = [self.get_local_dir(p, ctx) for p in project.participants]
self._make_dir(dirs)
def finalize(self, ctx: dict):
if ctx["last_prod_stage"] >= 99:
print(f"Please clean up {ctx['workspace']} by removing prod_N folders")
print("After clean-up, rerun the provision command.")
else:
current_prod_stage = str(ctx["last_prod_stage"] + 1).zfill(2)
current_prod_dir = os.path.join(ctx["workspace"], f"prod_{current_prod_stage}")
shutil.move(self.get_wip_dir(ctx), current_prod_dir)
ctx.pop("wip_dir", None)
print(f"Generated results can be found under {current_prod_dir}. ")
ctx["current_prod_dir"] = current_prod_dir
| NVFlare-main | nvflare/lighter/impl/workspace.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from nvflare.lighter.spec import Builder, Project
from nvflare.lighter.utils import sign_all
class SignatureBuilder(Builder):
"""Sign files with rootCA's private key.
Creates signatures for all the files signed with the root CA for the startup kits so that they
can be cryptographically verified to ensure any tampering is detected. This builder writes the signature.json file.
"""
def _do_sign(self, root_pri_key, dest_dir):
signatures = sign_all(dest_dir, root_pri_key)
json.dump(signatures, open(os.path.join(dest_dir, "signature.json"), "wt"))
def build(self, project: Project, ctx: dict):
root_pri_key = ctx.get("root_pri_key")
overseer = project.get_participants_by_type("overseer")
if overseer:
dest_dir = self.get_kit_dir(overseer, ctx)
self._do_sign(root_pri_key, dest_dir)
servers = project.get_participants_by_type("server", first_only=False)
for server in servers:
dest_dir = self.get_kit_dir(server, ctx)
self._do_sign(root_pri_key, dest_dir)
for p in project.get_participants_by_type("client", first_only=False):
dest_dir = self.get_kit_dir(p, ctx)
self._do_sign(root_pri_key, dest_dir)
| NVFlare-main | nvflare/lighter/impl/signature.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.lighter.impl.cert import CertBuilder
class LocalCertBuilder(CertBuilder):
def get_subject(self, participant):
if participant.type == "server":
return "localhost"
else:
return participant.name
| NVFlare-main | nvflare/lighter/impl/local_cert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.lighter.impl.static_file import StaticFileBuilder
class LocalStaticFileBuilder(StaticFileBuilder):
def __init__(
self,
enable_byoc=False,
config_folder="",
scheme="grpc",
app_validator="",
download_job_url="",
docker_image="",
snapshot_persistor="",
overseer_agent="",
components="",
username="",
):
"""Build all static files from template.
Uses the information from project.yml through project to go through the participants and write the contents of
each file with the template, and replacing with the appropriate values from project.yml.
Usually, two main categories of files are created in all FL participants, static and dynamic. Static files
have similar contents among different participants, with small differences. For example, the differences in
sub_start.sh are client name and python module. Those are basically static files. This builder uses template
file and string replacement to generate those static files for each participant.
Args:
enable_byoc: for each participant, true to enable loading of code in the custom folder of applications
config_folder: usually "config"
app_validator: optional path to an app validator to verify that uploaded app has the expected structure
docker_image: when docker_image is set to a docker image name, docker.sh will be generated on server/client/admin
"""
super().__init__(
enable_byoc,
config_folder,
scheme,
app_validator,
download_job_url,
docker_image,
snapshot_persistor,
overseer_agent,
components,
)
self.username = username
def get_server_name(self, server):
return "localhost"
def get_overseer_name(self, overseer):
return "localhost"
def prepare_admin_config(self, admin, ctx):
config = super().prepare_admin_config(admin, ctx)
config["admin"]["username"] = self.username
config["admin"]["cred_type"] = "local_cert"
return config
| NVFlare-main | nvflare/lighter/impl/local_static_file.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import yaml
from nvflare.lighter.spec import Builder
from nvflare.lighter.utils import sh_replace
class StaticFileBuilder(Builder):
def __init__(
self,
enable_byoc=False,
config_folder="",
scheme="grpc",
app_validator="",
download_job_url="",
docker_image="",
snapshot_persistor="",
overseer_agent="",
components="",
):
"""Build all static files from template.
Uses the information from project.yml through project to go through the participants and write the contents of
each file with the template, and replacing with the appropriate values from project.yml.
Usually, two main categories of files are created in all FL participants, static and dynamic. Static files
have similar contents among different participants, with small differences. For example, the differences in
sub_start.sh are client name and python module. Those are basically static files. This builder uses template
file and string replacement to generate those static files for each participant.
Args:
enable_byoc: for each participant, true to enable loading of code in the custom folder of applications
config_folder: usually "config"
app_validator: optional path to an app validator to verify that uploaded app has the expected structure
docker_image: when docker_image is set to a docker image name, docker.sh will be generated on server/client/admin
"""
self.enable_byoc = enable_byoc
self.config_folder = config_folder
self.scheme = scheme
self.docker_image = docker_image
self.download_job_url = download_job_url
self.app_validator = app_validator
self.overseer_agent = overseer_agent
self.snapshot_persistor = snapshot_persistor
self.components = components
def _write(self, file_full_path, content, mode, exe=False):
mode = mode + "w"
with open(file_full_path, mode) as f:
f.write(content)
if exe:
os.chmod(file_full_path, 0o755)
def get_server_name(self, server):
return server.name
def get_overseer_name(self, overseer):
return overseer.name
def _build_overseer(self, overseer, ctx):
dest_dir = self.get_kit_dir(overseer, ctx)
self._write(
os.path.join(dest_dir, "start.sh"),
self.template["start_svr_sh"],
"t",
exe=True,
)
protocol = overseer.props.get("protocol", "http")
api_root = overseer.props.get("api_root", "/api/v1/")
default_port = "443" if protocol == "https" else "80"
port = overseer.props.get("port", default_port)
replacement_dict = {"port": port, "hostname": self.get_overseer_name(overseer)}
admins = self.project.get_participants_by_type("admin", first_only=False)
privilege_dict = dict()
for admin in admins:
role = admin.props.get("role")
if role in privilege_dict:
privilege_dict[role].append(admin.subject)
else:
privilege_dict[role] = [admin.subject]
self._write(
os.path.join(dest_dir, "privilege.yml"),
yaml.dump(privilege_dict, Dumper=yaml.Dumper),
"t",
exe=False,
)
if self.docker_image:
self._write(
os.path.join(dest_dir, "docker.sh"),
sh_replace(self.template["docker_svr_sh"], replacement_dict),
"t",
exe=True,
)
self._write(
os.path.join(dest_dir, "gunicorn.conf.py"),
sh_replace(self.template["gunicorn_conf_py"], replacement_dict),
"t",
exe=False,
)
self._write(
os.path.join(dest_dir, "start.sh"),
self.template["start_ovsr_sh"],
"t",
exe=True,
)
if port:
ctx["overseer_end_point"] = f"{protocol}://{self.get_overseer_name(overseer)}:{port}{api_root}"
else:
ctx["overseer_end_point"] = f"{protocol}://{self.get_overseer_name(overseer)}{api_root}"
def _build_server(self, server, ctx):
config = json.loads(self.template["fed_server"])
dest_dir = self.get_kit_dir(server, ctx)
server_0 = config["servers"][0]
server_0["name"] = self.project_name
admin_port = server.props.get("admin_port", 8003)
ctx["admin_port"] = admin_port
fed_learn_port = server.props.get("fed_learn_port", 8002)
ctx["fed_learn_port"] = fed_learn_port
ctx["server_name"] = self.get_server_name(server)
server_0["service"]["target"] = f"{self.get_server_name(server)}:{fed_learn_port}"
server_0["service"]["scheme"] = self.scheme
server_0["admin_host"] = self.get_server_name(server)
server_0["admin_port"] = admin_port
# if self.download_job_url:
# server_0["download_job_url"] = self.download_job_url
# config["enable_byoc"] = server.enable_byoc
# if self.app_validator:
# config["app_validator"] = {"path": self.app_validator}
if self.overseer_agent:
overseer_agent = copy.deepcopy(self.overseer_agent)
if overseer_agent.get("overseer_exists", True):
overseer_agent["args"] = {
"role": "server",
"overseer_end_point": ctx.get("overseer_end_point", ""),
"project": self.project_name,
"name": self.get_server_name(server),
"fl_port": str(fed_learn_port),
"admin_port": str(admin_port),
}
overseer_agent.pop("overseer_exists", None)
config["overseer_agent"] = overseer_agent
# if self.snapshot_persistor:
# config["snapshot_persistor"] = self.snapshot_persistor
# components = server.props.get("components", [])
# config["components"] = list()
# for comp in components:
# temp_dict = {"id": comp}
# temp_dict.update(components[comp])
# config["components"].append(temp_dict)
# provisioned_client_list = list()
# for client in self.project.get_participants_by_type("client", first_only=False):
# provisioned_client_list.append(client.name)
# config["provisioned_client_list"] = provisioned_client_list
self._write(os.path.join(dest_dir, "fed_server.json"), json.dumps(config, indent=2), "t")
replacement_dict = {
"admin_port": admin_port,
"fed_learn_port": fed_learn_port,
"config_folder": self.config_folder,
"docker_image": self.docker_image,
"org_name": server.org,
}
if self.docker_image:
self._write(
os.path.join(dest_dir, "docker.sh"),
sh_replace(self.template["docker_svr_sh"], replacement_dict),
"t",
exe=True,
)
self._write(
os.path.join(dest_dir, "start.sh"),
self.template["start_svr_sh"],
"t",
exe=True,
)
self._write(
os.path.join(dest_dir, "sub_start.sh"),
sh_replace(self.template["sub_start_svr_sh"], replacement_dict),
"t",
exe=True,
)
self._write(
os.path.join(dest_dir, "stop_fl.sh"),
self.template["stop_fl_sh"],
"t",
exe=True,
)
# local folder creation
dest_dir = self.get_local_dir(server, ctx)
self._write(
os.path.join(dest_dir, "log.config.default"),
self.template["log_config"],
"t",
)
self._write(
os.path.join(dest_dir, "resources.json.default"),
self.template["local_server_resources"],
"t",
)
self._write(
os.path.join(dest_dir, "privacy.json.sample"),
self.template["sample_privacy"],
"t",
)
self._write(
os.path.join(dest_dir, "authorization.json.default"),
self.template["default_authz"],
"t",
)
# workspace folder file
self._write(
os.path.join(self.get_ws_dir(server, ctx), "readme.txt"),
self.template["readme_fs"],
"t",
)
def _build_client(self, client, ctx):
config = json.loads(self.template["fed_client"])
dest_dir = self.get_kit_dir(client, ctx)
fed_learn_port = ctx.get("fed_learn_port")
server_name = ctx.get("server_name")
# config["servers"][0]["service"]["target"] = f"{server_name}:{fed_learn_port}"
config["servers"][0]["service"]["scheme"] = self.scheme
config["servers"][0]["name"] = self.project_name
# config["enable_byoc"] = client.enable_byoc
replacement_dict = {
"client_name": f"{client.subject}",
"config_folder": self.config_folder,
"docker_image": self.docker_image,
"org_name": client.org,
}
if self.overseer_agent:
overseer_agent = copy.deepcopy(self.overseer_agent)
if overseer_agent.get("overseer_exists", True):
overseer_agent["args"] = {
"role": "client",
"overseer_end_point": ctx.get("overseer_end_point", ""),
"project": self.project_name,
"name": client.subject,
}
overseer_agent.pop("overseer_exists", None)
config["overseer_agent"] = overseer_agent
# components = client.props.get("components", [])
# config["components"] = list()
# for comp in components:
# temp_dict = {"id": comp}
# temp_dict.update(components[comp])
# config["components"].append(temp_dict)
self._write(os.path.join(dest_dir, "fed_client.json"), json.dumps(config, indent=2), "t")
if self.docker_image:
self._write(
os.path.join(dest_dir, "docker.sh"),
sh_replace(self.template["docker_cln_sh"], replacement_dict),
"t",
exe=True,
)
self._write(
os.path.join(dest_dir, "start.sh"),
self.template["start_cln_sh"],
"t",
exe=True,
)
self._write(
os.path.join(dest_dir, "sub_start.sh"),
sh_replace(self.template["sub_start_cln_sh"], replacement_dict),
"t",
exe=True,
)
self._write(
os.path.join(dest_dir, "stop_fl.sh"),
self.template["stop_fl_sh"],
"t",
exe=True,
)
# local folder creation
dest_dir = self.get_local_dir(client, ctx)
self._write(
os.path.join(dest_dir, "log.config.default"),
self.template["log_config"],
"t",
)
self._write(
os.path.join(dest_dir, "resources.json.default"),
self.template["local_client_resources"],
"t",
)
self._write(
os.path.join(dest_dir, "privacy.json.sample"),
self.template["sample_privacy"],
"t",
)
self._write(
os.path.join(dest_dir, "authorization.json.default"),
self.template["default_authz"],
"t",
)
# workspace folder file
self._write(
os.path.join(self.get_ws_dir(client, ctx), "readme.txt"),
self.template["readme_fc"],
"t",
)
def _build_admin(self, admin, ctx):
dest_dir = self.get_kit_dir(admin, ctx)
admin_port = ctx.get("admin_port")
server_name = ctx.get("server_name")
replacement_dict = {
"cn": f"{server_name}",
"admin_port": f"{admin_port}",
"docker_image": self.docker_image,
}
config = self.prepare_admin_config(admin, ctx)
self._write(os.path.join(dest_dir, "fed_admin.json"), json.dumps(config, indent=2), "t")
if self.docker_image:
self._write(
os.path.join(dest_dir, "docker.sh"),
sh_replace(self.template["docker_adm_sh"], replacement_dict),
"t",
exe=True,
)
self._write(
os.path.join(dest_dir, "fl_admin.sh"),
sh_replace(self.template["fl_admin_sh"], replacement_dict),
"t",
exe=True,
)
self._write(
os.path.join(dest_dir, "readme.txt"),
self.template["readme_am"],
"t",
)
def prepare_admin_config(self, admin, ctx):
config = json.loads(self.template["fed_admin"])
agent_config = dict()
if self.overseer_agent:
overseer_agent = copy.deepcopy(self.overseer_agent)
if overseer_agent.get("overseer_exists", True):
overseer_agent["args"] = {
"role": "admin",
"overseer_end_point": ctx.get("overseer_end_point", ""),
"project": self.project_name,
"name": admin.subject,
}
overseer_agent.pop("overseer_exists", None)
agent_config["overseer_agent"] = overseer_agent
config["admin"].update(agent_config)
return config
def build(self, project, ctx):
self.template = ctx.get("template")
self.project_name = project.name
self.project = project
overseer = project.get_participants_by_type("overseer")
if overseer:
self._build_overseer(overseer, ctx)
servers = project.get_participants_by_type("server", first_only=False)
for server in servers:
self._build_server(server, ctx)
for client in project.get_participants_by_type("client", first_only=False):
self._build_client(client, ctx)
for admin in project.get_participants_by_type("admin", first_only=False):
self._build_admin(admin, ctx)
| NVFlare-main | nvflare/lighter/impl/static_file.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from enum import Enum
from typing import Dict, Optional
from nvflare.app_common.model_exchange.constants import ModelExchangeFormat
from nvflare.fuel.utils.config_factory import ConfigFactory
class TransferType(str, Enum):
FULL = "FULL"
DIFF = "DIFF"
class ConfigKey:
EXCHANGE_PATH = "exchange_path"
EXCHANGE_FORMAT = "exchange_format"
TRANSFER_TYPE = "transfer_type"
GLOBAL_EVAL = "global_eval"
TRAINING = "training"
class ClientConfig:
"""Config class used in nvflare.client module.
Example:
{
"exchange_path": "./",
"exchange_format": "pytorch",
"transfer_type": "FULL"
}
"""
def __init__(self, config: Optional[Dict] = None):
if config is None:
config = {}
self.config = config
if ConfigKey.EXCHANGE_FORMAT in self.config:
self.config[ConfigKey.EXCHANGE_FORMAT] = ModelExchangeFormat(self.config[ConfigKey.EXCHANGE_FORMAT])
def get_config(self):
return self.config
def get_exchange_path(self):
return self.config[ConfigKey.EXCHANGE_PATH]
def get_exchange_format(self) -> ModelExchangeFormat:
return self.config[ConfigKey.EXCHANGE_FORMAT]
def get_transfer_type(self):
return self.config.get(ConfigKey.TRANSFER_TYPE, "FULL")
def to_json(self, config_file: str):
with open(config_file, "w") as f:
json.dump(self.config, f)
def from_file(config_file: str):
config = ConfigFactory.load_config(config_file)
if config is None:
raise RuntimeError(f"Load config file {config} failed.")
return ClientConfig(config=config.to_dict())
| NVFlare-main | nvflare/client/config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from inspect import signature
from nvflare.app_common.abstract.fl_model import FLModel
from .api import PROCESS_MODEL_REGISTRY
def _replace_func_args(func, kwargs, model: FLModel):
# Replace only the first argument
first_params = next(iter(signature(func).parameters.values()))
kwargs[first_params.name] = model
def train(
_func=None,
**root_kwargs,
):
def decorator(train_fn):
@functools.wraps(train_fn)
def wrapper(*args, **kwargs):
pid = os.getpid()
if pid not in PROCESS_MODEL_REGISTRY:
raise RuntimeError("needs to call init method first")
cache = PROCESS_MODEL_REGISTRY[pid]
input_model = cache.get_model()
# Replace func arguments
_replace_func_args(train_fn, kwargs, input_model)
return_value = train_fn(**kwargs)
if return_value is None:
raise RuntimeError("return value is None!")
elif not isinstance(return_value, FLModel):
raise RuntimeError("return value needs to be an FLModel.")
if cache.metrics is not None:
return_value.metrics = cache.metrics
cache.send(model=return_value)
cache.model_exchanger.finalize(close_pipe=False)
PROCESS_MODEL_REGISTRY.pop(pid)
return return_value
return wrapper
if _func is None:
return decorator
else:
return decorator(_func)
def evaluate(
_func=None,
**root_kwargs,
):
def decorator(eval_fn):
@functools.wraps(eval_fn)
def wrapper(*args, **kwargs):
pid = os.getpid()
if pid not in PROCESS_MODEL_REGISTRY:
raise RuntimeError("needs to call init method first")
cache = PROCESS_MODEL_REGISTRY[pid]
input_model = cache.get_model()
_replace_func_args(eval_fn, kwargs, input_model)
return_value = eval_fn(**kwargs)
if return_value is None:
raise RuntimeError("return value is None!")
cache.metrics = return_value
return return_value
return wrapper
if _func is None:
return decorator
else:
return decorator(_func)
| NVFlare-main | nvflare/client/decorator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SYS_ATTRS = ("job_id", "site_name", "total_rounds")
CONST_ATTRS = ("total_rounds",)
CONFIG_EXCHANGE = "config_exchange.json"
| NVFlare-main | nvflare/client/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://github.com/microsoft/pylance-release/issues/856
from nvflare.app_common.abstract.fl_model import FLModel as FLModel
from nvflare.app_common.abstract.fl_model import ParamsType as ParamsType
from .api import init as init
from .api import params_diff as params_diff
from .api import receive as receive
from .api import send as send
from .api import system_info as system_info
from .decorator import evaluate as evaluate
from .decorator import train as train
| NVFlare-main | nvflare/client/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Union
from nvflare.app_common.abstract.fl_model import FLModel, MetaKey
from nvflare.app_common.model_exchange.constants import ModelExchangeFormat
from nvflare.app_common.model_exchange.file_pipe_model_exchanger import FilePipeModelExchanger
from nvflare.fuel.utils import fobs
from nvflare.fuel.utils.import_utils import optional_import
from .config import ClientConfig, from_file
from .constants import CONFIG_EXCHANGE
from .model_registry import ModelRegistry
from .utils import DIFF_FUNCS
PROCESS_MODEL_REGISTRY: Dict[int, ModelRegistry] = {}
def init(config: Union[str, Dict] = f"config/{CONFIG_EXCHANGE}"):
"""Initializes NVFlare Client API environment.
Args:
config (str or dict): configuration file or config dictionary.
"""
pid = os.getpid()
if pid in PROCESS_MODEL_REGISTRY:
raise RuntimeError("Can't call init twice.")
if isinstance(config, str):
client_config = from_file(config_file=config)
elif isinstance(config, dict):
client_config = ClientConfig(config=config)
else:
raise ValueError("config should be either a string or dictionary.")
if client_config.get_exchange_format() == ModelExchangeFormat.PYTORCH:
tensor_decomposer, ok = optional_import(module="nvflare.app_opt.pt.decomposers", name="TensorDecomposer")
if ok:
fobs.register(tensor_decomposer)
else:
raise RuntimeError(f"Can't import TensorDecomposer for format: {ModelExchangeFormat.PYTORCH}")
# TODO: make things configurable in config_exchange
mdx = FilePipeModelExchanger(data_exchange_path=client_config.get_exchange_path())
PROCESS_MODEL_REGISTRY[pid] = ModelRegistry(mdx, client_config)
def receive() -> FLModel:
"""Receives model from NVFlare side.
Returns:
A tuple of model, metadata received.
"""
pid = os.getpid()
if pid not in PROCESS_MODEL_REGISTRY:
raise RuntimeError("needs to call init method first")
model_registry = PROCESS_MODEL_REGISTRY[pid]
return model_registry.get_model()
def send(fl_model: FLModel, clear_registry: bool = True) -> None:
"""Sends the model to NVFlare side.
Args:
clear_registry (bool): To clear the registry or not.
"""
pid = os.getpid()
if pid not in PROCESS_MODEL_REGISTRY:
raise RuntimeError("needs to call init method first")
model_registry = PROCESS_MODEL_REGISTRY[pid]
model_registry.send(model=fl_model)
if clear_registry:
clear()
def clear():
"""Clears the model registry."""
pid = os.getpid()
if pid not in PROCESS_MODEL_REGISTRY:
raise RuntimeError("needs to call init method first")
model_registry = PROCESS_MODEL_REGISTRY[pid]
model_registry.clear()
def system_info() -> Dict:
"""Gets NVFlare system information.
System information will be available after a valid FLModel is received.
It does not retrieve information actively.
Returns:
A dict of system information.
"""
pid = os.getpid()
if pid not in PROCESS_MODEL_REGISTRY:
raise RuntimeError("needs to call init method first")
model_registry = PROCESS_MODEL_REGISTRY[pid]
return model_registry.get_sys_info()
def params_diff(original: Dict, new: Dict) -> Dict:
pid = os.getpid()
if pid not in PROCESS_MODEL_REGISTRY:
raise RuntimeError("needs to call init method first")
model_registry = PROCESS_MODEL_REGISTRY[pid]
diff_func = DIFF_FUNCS.get(model_registry.config.get_exchange_format(), None)
if diff_func is None:
raise RuntimeError("no default params diff function")
return diff_func(original, new)
def get_config() -> Dict:
pid = os.getpid()
if pid not in PROCESS_MODEL_REGISTRY:
raise RuntimeError("needs to call init method first")
model_registry = PROCESS_MODEL_REGISTRY[pid]
return model_registry.config.config
def get_job_id() -> str:
sys_info = system_info()
return sys_info.get(MetaKey.JOB_ID, "")
def get_total_rounds() -> int:
sys_info = system_info()
return sys_info.get(MetaKey.TOTAL_ROUNDS, 0)
def get_site_name() -> str:
sys_info = system_info()
return sys_info.get(MetaKey.SITE_NAME, "")
| NVFlare-main | nvflare/client/api.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Iterable
from nvflare.app_common.abstract.fl_model import FLModel
from nvflare.app_common.model_exchange.constants import ModelExchangeFormat
from .constants import CONST_ATTRS
def get_meta_from_fl_model(fl_model: FLModel, attrs: Iterable[str]) -> Dict:
"""Get metadata from an FLModel object.
Args:
fl_model: an FLModel object.
attrs: attributes to get from FLModel.
Returns:
A dictionary with attribute name as key and FLModel's attribute as value.
"""
meta = {}
for attr in attrs:
if hasattr(fl_model, attr):
meta[attr] = getattr(fl_model, attr)
elif attr in fl_model.meta:
meta[attr] = fl_model.meta[attr]
else:
raise RuntimeError(f"can't find attribute {attr} in fl_model.")
return meta
def set_fl_model_with_meta(fl_model: FLModel, meta: Dict, attrs):
"""Sets FLModel attributes.
Args:
fl_model: an FLModel object.
meta: a dict contains attributes.
attrs: attributes to set.
"""
for attr in attrs:
setattr(fl_model, attr, meta[attr])
meta.pop(attr)
def copy_fl_model_attributes(src: FLModel, dst: FLModel, attrs=CONST_ATTRS):
"""Copies FLModel attributes from source to destination.
Args:
src: source FLModel object.
dst: destination FLModel object.
attrs: attributes to copy.
"""
for attr in attrs:
setattr(dst, attr, getattr(src, attr))
def numerical_params_diff(original: Dict, new: Dict) -> Dict:
"""Calculates the numerical parameter difference.
Args:
original: A dict of numerical values.
new: A dict of numerical values.
Returns:
A dict with same key as original dict,
value are the difference between original and new.
"""
diff_dict = {}
for k in original:
if k not in new:
continue
if isinstance(new[k], list) and isinstance(original[k], list):
diff = [new[k][i] - original[k][i] for i in range(len(new[k]))]
else:
diff = new[k] - original[k]
diff_dict[k] = diff
return diff_dict
DIFF_FUNCS = {ModelExchangeFormat.PYTORCH: numerical_params_diff, ModelExchangeFormat.NUMPY: numerical_params_diff}
| NVFlare-main | nvflare/client/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import Optional
from nvflare.app_common.abstract.fl_model import FLModel, ParamsType
from nvflare.app_common.model_exchange.model_exchanger import ModelExchanger
from .config import ClientConfig
from .constants import SYS_ATTRS
from .utils import DIFF_FUNCS, get_meta_from_fl_model
class ModelRegistry:
"""This class is used to remember attributes that need to share for a user code.
For example, after "global_evaluate" we should remember the "metrics" value.
And set that into the model that we want to submit after "train".
For each user file:
- we only need 1 model exchanger.
- we only need to pull global model once
"""
def __init__(self, model_exchanger: ModelExchanger, config: ClientConfig):
self.model_exchanger = model_exchanger
self.config = config
self.cached_model: Optional[FLModel] = None
self.cache_loaded = False
self.metrics = None
self.sys_info = None
self.output_meta = {}
def receive(self):
self.cached_model = self.model_exchanger.receive_model()
self.sys_info = get_meta_from_fl_model(self.cached_model, SYS_ATTRS)
self.cache_loaded = True
def get_model(self):
if not self.cache_loaded:
self.receive()
return copy.deepcopy(self.cached_model)
def get_sys_info(self):
if not self.cache_loaded:
self.receive()
return self.sys_info
def send(self, model: FLModel) -> None:
if self.config.get_transfer_type() == "DIFF":
exchange_format = self.config.get_exchange_format()
diff_func = DIFF_FUNCS.get(exchange_format, None)
if diff_func is None:
raise RuntimeError(f"no default params diff function for {exchange_format}")
elif self.cached_model is None:
raise RuntimeError("no received model")
elif model.params is not None:
try:
model.params = diff_func(original=self.cached_model.params, new=model.params)
model.params_type = ParamsType.DIFF
except Exception as e:
raise RuntimeError(f"params diff function failed: {e}")
elif model.metrics is None:
raise RuntimeError("the model to send does not have either params or metrics")
self.model_exchanger.submit_model(model=model)
def clear(self):
self.cached_model = None
self.cache_loaded = False
self.sys_info = None
self.metrics = None
self.model_exchanger.finalize(close_pipe=False)
def __str__(self):
return f"{self.__class__.__name__}(config: {self.config.get_config()})"
| NVFlare-main | nvflare/client/model_registry.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.utils.import_utils import optional_import
pytorch_lightning, ok = optional_import(module="pytorch_lightning")
if ok:
from nvflare.app_common.abstract.fl_model import FLModel as FLModel
from nvflare.app_common.abstract.fl_model import ParamsType as ParamsType
from nvflare.app_opt.lightning import FLCallback as FLCallback
from nvflare.app_opt.lightning import patch as patch
from nvflare.client import params_diff as params_diff
from nvflare.client import send as send
from nvflare.client import system_info as system_info
| NVFlare-main | nvflare/client/lightning/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from nvflare.apis.fl_component import FLComponent
class Widget(FLComponent):
"""Pre-defined components that address specific needs.
Some examples of such needs:
- report current status
- dynamically change its tunable parameters
- record processing errors
- stats recording
Each widget is a singleton object that is registered with the Engine with a
unique ID.
All built-in widget IDs are documented in the WidgetID class.
"""
def __init__(self):
"""Init the Widget."""
FLComponent.__init__(self)
class WidgetID(str, Enum):
INFO_COLLECTOR = "info_collector"
COMPONENT_CALLER = "component_caller"
FED_EVENT_RUNNER = "fed_event_runner"
| NVFlare-main | nvflare/widgets/widget.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from nvflare.apis.client_engine_spec import ClientEngineSpec
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import EventScope, FedEventHeader, FLContextKey, ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.server_engine_spec import ServerEngineSpec
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.widgets.widget import Widget
FED_EVENT_TOPIC = "fed.event"
class FedEventRunner(Widget):
def __init__(self, topic=FED_EVENT_TOPIC, regular_interval=0.01, grace_period=2.0):
"""Init FedEventRunner.
The FedEventRunner handles posting and receiving of fed events.
The system will do its best to fire off all events in the queue before shutdown
using the ABOUT_TO_END_RUN event and a grace period during END_RUN.
Args:
topic: the fed event topic to be handled. Defaults to 'fed.event'
"""
Widget.__init__(self)
self.topic = topic
self.abort_signal = None
self.asked_to_stop = False
self.regular_interval = regular_interval
self.grace_period = grace_period
self.engine = None
self.last_timestamps = {} # client name => last_timestamp
self.in_events = []
self.in_lock = threading.Lock()
self.poster = None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.engine = fl_ctx.get_engine()
self.engine.register_aux_message_handler(topic=self.topic, message_handle_func=self._receive)
self.abort_signal = fl_ctx.get_run_abort_signal()
self.asked_to_stop = False
elif event_type == EventType.END_RUN:
self.asked_to_stop = True
if self.poster is not None and self.poster.is_alive():
self.poster.join()
else:
# handle outgoing fed events
event_scope = fl_ctx.get_prop(key=FLContextKey.EVENT_SCOPE, default=EventScope.LOCAL)
if event_scope != EventScope.FEDERATION:
return
event_data = fl_ctx.get_prop(FLContextKey.EVENT_DATA, None)
if not isinstance(event_data, Shareable):
self.log_error(fl_ctx, "bad fed event: expect data to be Shareable but got {}".format(type(event_data)))
return
direction = event_data.get_header(FedEventHeader.DIRECTION, "out")
if direction != "out":
# ignore incoming events
return
event_data.set_header(FedEventHeader.EVENT_TYPE, event_type)
event_data.set_header(FedEventHeader.ORIGIN, fl_ctx.get_identity_name())
event_data.set_header(FedEventHeader.TIMESTAMP, time.time())
targets = event_data.get_header(FedEventHeader.TARGETS, None)
self.fire_and_forget_request(request=event_data, fl_ctx=fl_ctx, targets=targets, secure=False)
def fire_and_forget_request(self, request: Shareable, fl_ctx: FLContext, targets=None, secure=False):
pass
def _receive(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
peer_name = request.get_peer_prop(ReservedKey.IDENTITY_NAME, None)
if not peer_name:
self.log_error(fl_ctx, "missing identity name of the data sender")
return make_reply(ReturnCode.MISSING_PEER_CONTEXT)
timestamp = request.get_header(FedEventHeader.TIMESTAMP, None)
if timestamp is None:
self.log_error(fl_ctx, "missing timestamp in incoming fed event")
return make_reply(ReturnCode.BAD_REQUEST_DATA)
event_type = request.get_header(FedEventHeader.EVENT_TYPE, None)
if event_type is None:
self.log_error(fl_ctx, "missing event_type in incoming fed event")
return make_reply(ReturnCode.BAD_REQUEST_DATA)
with self.in_lock:
if self.poster is None:
# create the poster thread now
self.poster = threading.Thread(target=self._post, name="fed_event_poster")
self.poster.start()
last_timestamp = self.last_timestamps.get(peer_name, None)
if last_timestamp is None or timestamp > last_timestamp:
# we only keep new items, in case the peer somehow sent old items
request.set_header(FedEventHeader.DIRECTION, "in")
self.in_events.append(request)
self.last_timestamps[peer_name] = timestamp
# NOTE: we do not fire event here since event process could take time.
# Instead, we simply add the package to the queue and return quickly.
# The posting of events will be handled in the poster thread
return make_reply(ReturnCode.OK)
def _post(self):
"""Post an event.
During ABOUT_TO_END_RUN, sleep_time is 0 and system will flush
in_events by firing events without delay.
During END_RUN, system will wait for self.grace_period, even the queue is empty,
so any new item can be processed.
However, since the system does not guarantee the receiving side of _post is still
alive, we catch the exception and show warning messages to users if events can not
be handled by receiving side.
"""
sleep_time = self.regular_interval
while True:
time.sleep(sleep_time)
if self.abort_signal.triggered:
break
n = len(self.in_events)
if n > 0:
sleep_time = 0.0
with self.in_lock:
event_to_post = self.in_events.pop(0)
elif self.asked_to_stop:
time.sleep(self.grace_period)
if len(self.in_events) > 0:
continue
else:
break
else:
sleep_time = self.regular_interval
continue
with self.engine.new_context() as fl_ctx:
if self.asked_to_stop:
self.log_warning(fl_ctx, f"{n} items remained in in_events. Will stop when it reaches 0.")
fl_ctx.set_prop(key=FLContextKey.EVENT_DATA, value=event_to_post, private=True, sticky=False)
fl_ctx.set_prop(key=FLContextKey.EVENT_SCOPE, value=EventScope.FEDERATION, private=True, sticky=False)
event_type = event_to_post.get_header(FedEventHeader.EVENT_TYPE)
try:
self.engine.fire_event(event_type=event_type, fl_ctx=fl_ctx)
except Exception as e:
if self.asked_to_stop:
self.log_warning(fl_ctx, f"event {event_to_post} fired unsuccessfully during END_RUN")
else:
raise e
class ServerFedEventRunner(FedEventRunner):
def __init__(self, topic=FED_EVENT_TOPIC, regular_interval=0.01, grace_period=2.0):
"""Init ServerFedEventRunner."""
FedEventRunner.__init__(self, topic, regular_interval, grace_period)
def fire_and_forget_request(self, request: Shareable, fl_ctx: FLContext, targets=None, secure=False):
if not isinstance(self.engine, ServerEngineSpec):
raise TypeError("self.engine must be ServerEngineSpec but got {}".format(type(self.engine)))
self.engine.fire_and_forget_aux_request(
topic=self.topic, targets=targets, request=request, fl_ctx=fl_ctx, secure=secure
)
class ClientFedEventRunner(FedEventRunner):
def __init__(self, topic=FED_EVENT_TOPIC):
"""Init ClientFedEventRunner."""
FedEventRunner.__init__(self, topic)
self.ready = False
def handle_event(self, event_type: str, fl_ctx: FLContext):
super().handle_event(event_type, fl_ctx)
if event_type == EventType.START_RUN:
self.ready = True
def fire_and_forget_request(self, request: Shareable, fl_ctx: FLContext, targets=None, secure=False):
if not self.ready:
self.log_warning(fl_ctx, "Engine in not ready, skip the fed event firing.")
return
if not isinstance(self.engine, ClientEngineSpec):
raise TypeError("self.engine must be ClientEngineSpec but got {}".format(type(self.engine)))
self.engine.fire_and_forget_aux_request(topic=self.topic, request=request, fl_ctx=fl_ctx, secure=secure)
| NVFlare-main | nvflare/widgets/fed_event.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/widgets/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from nvflare.apis.analytix import AnalyticsData
from nvflare.apis.dxo import from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from .widget import Widget
class GroupInfoCollector(object):
def __init__(self):
"""Records the information using a dict of dict.
Note:
Key is group name and value is the information dictionary.
"""
self.info = {}
def set_info(self, group_name: str, info: dict):
self.info[group_name] = info
def add_info(self, group_name: str, info: dict):
if group_name not in self.info:
self.info[group_name] = info
else:
self.info[group_name].update(info)
class InfoCollector(Widget):
CATEGORY_STATS = "stats"
CATEGORY_ERROR = "error"
EVENT_TYPE_GET_STATS = "info_collector.get_stats"
CTX_KEY_STATS_COLLECTOR = "info_collector.stats_collector"
def __init__(self):
"""A widget for information collection.
Note:
self.categories structure:
category (dict)
group (dict)
key/value (dict)
"""
super().__init__()
self.categories = {}
self.engine = None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.reset_all()
self.engine = fl_ctx.get_engine()
elif event_type == EventType.END_RUN:
self.engine = None
elif event_type in (
EventType.CRITICAL_LOG_AVAILABLE,
EventType.ERROR_LOG_AVAILABLE,
EventType.WARNING_LOG_AVAILABLE,
EventType.EXCEPTION_LOG_AVAILABLE,
):
origin = fl_ctx.get_prop(FLContextKey.EVENT_ORIGIN, None)
if origin:
group_name = str(origin)
else:
group_name = "general"
data = fl_ctx.get_prop(FLContextKey.EVENT_DATA, None)
if not isinstance(data, Shareable):
# not a valid error report
self.log_error(
fl_ctx=fl_ctx,
msg="wrong event data type for event {}: expect Shareable but got {}".format(
event_type, type(data)
),
fire_event=False,
)
return
try:
dxo = from_shareable(data)
except:
self.log_exception(
fl_ctx=fl_ctx, msg="invalid event data type for event {}".format(event_type), fire_event=False
)
return
analytic_data = AnalyticsData.from_dxo(dxo)
if not analytic_data:
return
if event_type == EventType.CRITICAL_LOG_AVAILABLE:
key = "critical"
elif event_type == EventType.ERROR_LOG_AVAILABLE:
key = "error"
elif event_type == EventType.WARNING_LOG_AVAILABLE:
key = "warning"
else:
key = "exception"
self.add_error(group_name=group_name, key=key, err=analytic_data.value)
def get_run_stats(self) -> dict:
"""Gets status for this current run.
Returns:
A dictionary that contains the status for this run.
"""
# NOTE: it's important to assign self.engine to a new var!
# This is because another thread may fire the END_RUN event, which will cause
# self.engine to be set to None, just after checking it being None and before using it!
engine = self.engine
if not engine:
return {}
# NOTE: we need a new context here to make sure all sticky props are copied!
# We create a new StatusCollector to hold status info.
# Do not use the InfoCollector itself for thread safety - multiple calls to
# this method (from parallel admin commands) are possible at the same time!
with self.engine.new_context() as fl_ctx:
coll = GroupInfoCollector()
fl_ctx.set_prop(key=self.CTX_KEY_STATS_COLLECTOR, value=coll, sticky=False, private=True)
engine.fire_event(event_type=self.EVENT_TYPE_GET_STATS, fl_ctx=fl_ctx)
# Get the StatusCollector from the fl_ctx, it could have been updated by other component.
coll = fl_ctx.get_prop(InfoCollector.CTX_KEY_STATS_COLLECTOR)
return coll.info
def add_info(self, category_name: str, group_name: str, key: str, value):
"""Adds information to the specified category / group.
Args:
category_name (str): The top level distinction is called category.
group_name (str): One level down category is called group
key (str): The key to be recorded inside the dict.
value (str): The value to be recorded inside the dict.
"""
category = self.categories.get(category_name, None)
if not category:
category = dict()
self.categories[category_name] = category
group = category.get(group_name, None)
if not group:
group = dict()
category[group_name] = group
group[key] = value
def set_info(self, category_name: str, group_name: str, info: dict):
"""Sets information to the specified category / group.
Args:
category_name (str): The top level distinction is called category.
group_name (str): One level down category is called group
info (dict): The dict to be recorded.
Note:
This sets the entire dictionary vs add_info only add a key-value pair.
"""
category = self.categories.get(category_name, None)
if not category:
category = dict()
self.categories[category_name] = category
category[group_name] = info
def get_category(self, category_name: str):
"""Gets the category dict.
Args:
category_name (str): The name of the category.
Returns:
A dictionary of specified category.
"""
return self.categories.get(category_name, None)
def get_group(self, category_name: str, group_name: str):
"""Gets the group dict.
Args:
category_name (str): The name of the category.
group_name (str): The name of the group_name.
Returns:
A dictionary of specified category/group.
"""
cat = self.categories.get(category_name, None)
if not cat:
return None
return cat.get(group_name, None)
def reset_all(self):
"""Resets all information collected."""
self.categories = {}
def reset_category(self, category_name: str):
"""Resets the specified category information collected.
Args:
category_name (str): The name of the category.
"""
self.categories[category_name] = {}
def reset_group(self, category_name: str, group_name: str):
"""Resets the specified category/group information collected.
Args:
category_name (str): The name of the category.
group_name (str): The name of the group_name.
"""
cat = self.categories.get(category_name, None)
if not cat:
return
cat.get[group_name] = {}
def add_error(self, group_name: str, key: str, err: str):
"""Adds error information to error category.
Args:
group_name (str): One level down category is called group
key (str): The key to be recorded inside the dict.
err (str): The error value to be put in.
"""
now = datetime.datetime.now()
value = "{}: {}".format(now.strftime("%Y-%m-%d %H:%M:%S"), err)
self.add_info(category_name=self.CATEGORY_ERROR, group_name=group_name, key=key, value=value)
def get_errors(self):
"""Gets the error category information."""
return self.get_category(self.CATEGORY_ERROR)
def reset_errors(self):
"""Resets the error category information."""
self.reset_category(self.CATEGORY_ERROR)
| NVFlare-main | nvflare/widgets/info_collector.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from .widget import Widget
class CallInfo(object):
def __init__(self, target: str, action: str, params: dict):
"""Required information to call a component.
Args:
target (str): target component(s) that the call is applied to
action (str): action of the call
params (dict): params of the call
"""
self.target = target
self.action = action
self.params = params
self.results = {} # results of components that tried to apply the params
def record_result(self, target: str, result: str = "OK"):
"""Records the result.
Args:
target (str): the target component(s) that is called
result (str): the result generated by calling the target component(s)
"""
self.results[target] = result
class ComponentCaller(Widget):
EVENT_TYPE_CALL_COMPONENT = "comp_caller.call"
CTX_KEY_CALL_INFO = "comp_caller.call_info"
def __init__(self):
"""A widget enables calling component(s)."""
super().__init__()
self.engine = None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.engine = fl_ctx.get_engine()
elif event_type == EventType.END_RUN:
self.engine = None
def call_components(self, target: str, action: str, params: dict):
"""Makes a call to component(s).
Args:
target (str): the target spec of the component(s) to be called.
action (str): action of the call
params (dict): parameters for the call
Returns:
None or a dict of result: comp name => result string
NOTE:
each component that wants to participate the call mechanism must:
- Listen to the event EVENT_TYPE_CALL_COMPONENT
- In the event handler, decide whether the call is applicable to it by comparing itself to
the 'target'. The target could be a specific component ID, or a type of components
- decide further whether the call is applicable to it by looking at the 'action'.
Conceptually, the action is like a function to be called on the component.
If the component doesn't support the action, simply ignore the call.
- if the call is applicable, always report the execution status to the call.
"""
# NOTE: it's important to assign self.engine to a new var!
# This is because another thread may fire the END_RUN event, which will cause
# self.engine to be set to None, just after checking it being None and before using it!
engine = self.engine
if not engine:
return None
# NOTE: we need a new context here to make sure all sticky props are copied!
with engine.new_context() as fl_ctx:
info = CallInfo(target=target, action=action, params=params)
fl_ctx.set_prop(key=self.CTX_KEY_CALL_INFO, value=info, sticky=False, private=True)
engine.fire_event(event_type=self.EVENT_TYPE_CALL_COMPONENT, fl_ctx=fl_ctx)
return info.results
| NVFlare-main | nvflare/widgets/comp_caller.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/tool/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from nvflare.tool.package_checker import (
ClientPackageChecker,
NVFlareConsolePackageChecker,
OverseerPackageChecker,
ServerPackageChecker,
)
def define_preflight_check_parser(parser):
parser.add_argument("-p", "--package_path", required=True, type=str, help="path to specific package")
def check_packages(args):
package_path = args.package_path
if not os.path.isdir(package_path):
print(f"package_path {package_path} is not a valid directory.")
return
if not os.path.isdir(os.path.join(package_path, "startup")):
print(f"package in {package_path} is not in the correct format.")
return
package_checkers = [
OverseerPackageChecker(),
ServerPackageChecker(),
ClientPackageChecker(),
NVFlareConsolePackageChecker(),
]
for p in package_checkers:
p.init(package_path=package_path)
ret_code = 0
if p.should_be_checked():
ret_code = p.check()
p.print_report()
if ret_code == 1:
p.stop_dry_run(force=False)
elif ret_code == 2:
p.stop_dry_run(force=True)
def main():
parser = argparse.ArgumentParser("nvflare preflight check")
define_preflight_check_parser(parser)
args = parser.parse_args()
check_packages(args)
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/tool/preflight_check.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from typing import List
from nvflare.fuel.flare_api.api_spec import JobNotFound, NoConnection
from nvflare.fuel.flare_api.flare_api import Session
def shutdown_system(
prod_dir: str, username: str = "admin@nvidia.com", secure_mode: bool = True, timeout_in_sec: int = 30
):
admin_user_dir = os.path.join(prod_dir, username)
print("connect to nvflare server")
sess = None
conn_timeout = 10
try:
sess = Session(username=username, startup_path=admin_user_dir, secure_mode=secure_mode)
sess.try_connect(conn_timeout)
shutdown_system_by_session(sess=sess, timeout_in_sec=timeout_in_sec)
except NoConnection:
# system is already shutdown
return
finally:
if sess:
sess.close()
def shutdown_system_by_session(sess: Session, timeout_in_sec: int = 20):
print("checking running jobs")
jobs = sess.list_jobs()
active_job_ids = get_running_job_ids(jobs)
if len(active_job_ids) > 0:
print("Warning: current running jobs will be aborted")
abort_jobs(sess, active_job_ids)
print("shutdown NVFLARE")
sess.api.do_command("shutdown all")
wait_for_system_shutdown(sess, timeout_in_sec=timeout_in_sec)
def get_running_job_ids(jobs: list) -> List[str]:
if len(jobs) > 0:
running_job_ids = [job for job in jobs if job["status"] == "RUNNING"]
return running_job_ids
else:
return []
def abort_jobs(sess, job_ids):
for job_id in job_ids:
try:
sess.abort_job(job_id)
except JobNotFound:
# ignore invalid job id
pass
def wait_for_system_shutdown(sess: Session, timeout_in_sec: int = 30):
start = time.time()
duration = 0
cnt = 0
status = None
while (status is None or status == "started") and duration < timeout_in_sec:
try:
sys_info = sess.get_system_info()
status = sys_info.server_info.status
curr = time.time()
duration = curr - start
if cnt % 25 == 0:
print("waiting system to shutdown")
cnt += 1
time.sleep(0.2)
except Exception:
# Server is already shutdown
return
def wait_for_system_start(
num_clients: int,
prod_dir: str,
username: str = "admin",
secure_mode: bool = False,
second_to_wait: int = 10,
timeout_in_sec: int = 30,
):
print(f"wait for {second_to_wait} seconds before FL system is up")
time.sleep(second_to_wait)
# just in case try to connect before server started
flare_not_ready = True
start = time.time()
duration = 0
admin_user_dir = os.path.join(prod_dir, username)
conn_timeout = 10.0
while flare_not_ready and duration < timeout_in_sec:
print("trying to connect to server")
sess = None
try:
sess = Session(username=username, startup_path=admin_user_dir, secure_mode=secure_mode)
sess.try_connect(conn_timeout)
sys_info = sess.get_system_info()
print(f"Server info:\n{sys_info.server_info}")
print("\nClient info")
for client in sys_info.client_info:
print(client)
flare_not_ready = len(sys_info.client_info) < num_clients
curr = time.time()
duration = curr - start
time.sleep(2)
except NoConnection:
# server is not up yet
pass
except Exception as e:
print("failure", e)
finally:
if sess:
sess.close()
if flare_not_ready:
raise RuntimeError("can't not connect to server within {timeout_in_sec} sec")
else:
print("ready to go")
| NVFlare-main | nvflare/tool/api_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
import random
import shutil
import socket
import subprocess
import sys
import time
from typing import Dict, List, Optional, OrderedDict, Tuple
import yaml
from nvflare.cli_exception import CLIException
from nvflare.cli_unknown_cmd_exception import CLIUnknownCmdException
from nvflare.fuel.utils.class_utils import instantiate_class
from nvflare.fuel.utils.gpu_utils import get_host_gpu_ids
from nvflare.lighter.provision import gen_default_project_config, prepare_project
from nvflare.lighter.service_constants import FlareServiceConstants as SC
from nvflare.lighter.spec import Provisioner
from nvflare.lighter.utils import load_yaml, update_project_server_name_config, update_storage_locations
from nvflare.tool.api_utils import shutdown_system
DEFAULT_WORKSPACE = "/tmp/nvflare/poc"
DEFAULT_PROJECT_NAME = "example_project"
CMD_PREPARE_POC = "prepare"
CMD_PREPARE_EXAMPLES = "prepare-examples"
CMD_START_POC = "start"
CMD_STOP_POC = "stop"
CMD_CLEAN_POC = "clean"
def client_gpu_assignments(clients: List[str], gpu_ids: List[int]) -> Dict[str, List[int]]:
n_gpus = len(gpu_ids)
n_clients = len(clients)
gpu_assignments = {}
if n_gpus == 0:
for client in clients:
gpu_assignments[client] = []
if 0 < n_gpus <= n_clients:
for client_id, client in enumerate(clients):
gpu_index = client_id % n_gpus
gpu_assignments[client] = [gpu_ids[gpu_index]]
elif n_gpus > n_clients > 0:
client_name_map = {}
for client_id, client in enumerate(clients):
client_name_map[client_id] = client
for gpu_index, gpu_id in enumerate(gpu_ids):
client_id = gpu_index % n_clients
client = client_name_map[client_id]
if client not in gpu_assignments:
gpu_assignments[client] = []
gpu_assignments[client].append(gpu_ids[gpu_index])
return gpu_assignments
def get_service_command(cmd_type: str, prod_dir: str, service_dir, service_config: Dict) -> str:
cmd = ""
admin_dir_name = service_config.get(SC.FLARE_PROJ_ADMIN, SC.FLARE_PROJ_ADMIN)
if cmd_type == SC.CMD_START:
if not service_config.get(SC.IS_DOCKER_RUN):
if service_dir == admin_dir_name:
cmd = get_cmd_path(prod_dir, service_dir, "fl_admin.sh")
else:
cmd = get_cmd_path(prod_dir, service_dir, "start.sh")
else:
if service_dir == admin_dir_name:
cmd = get_cmd_path(prod_dir, service_dir, "fl_admin.sh")
else:
cmd = get_cmd_path(prod_dir, service_dir, "docker.sh -d")
elif cmd_type == SC.CMD_STOP:
if not service_config.get(SC.IS_DOCKER_RUN):
cmd = get_stop_cmd(prod_dir, service_dir)
else:
if service_dir == admin_dir_name:
cmd = get_stop_cmd(prod_dir, service_dir)
else:
cmd = f"docker stop {service_dir}"
else:
raise CLIException("unknown cmd_type :", cmd_type)
return cmd
def get_stop_cmd(poc_workspace: str, service_dir_name: str):
service_dir = os.path.join(poc_workspace, service_dir_name)
stop_file = os.path.join(service_dir, "shutdown.fl")
return f"touch {stop_file}"
def get_nvflare_home() -> Optional[str]:
nvflare_home = None
if "NVFLARE_HOME" in os.environ:
nvflare_home = os.getenv("NVFLARE_HOME")
if nvflare_home:
if nvflare_home.endswith("/"):
nvflare_home = nvflare_home[:-1]
return nvflare_home
def get_upload_dir(startup_dir) -> str:
console_config_path = os.path.join(startup_dir, "fed_admin.json")
try:
with open(console_config_path, "r") as f:
console_config = json.load(f)
upload_dir = console_config["admin"]["upload_dir"]
except IOError as e:
raise CLIException(f"failed to load {console_config_path} {e}")
except json.decoder.JSONDecodeError as e:
raise CLIException(f"failed to load {console_config_path}, please double check the configuration {e}")
return upload_dir
def is_dir_empty(path: str):
targe_dir = os.listdir(path)
return len(targe_dir) == 0
def prepare_examples(cmd_args):
poc_workspace = get_poc_workspace()
_prepare_examples(cmd_args.examples, poc_workspace)
def _prepare_examples(example_dir: str, workspace: str, config_packages: Optional[Tuple] = None):
project_config, service_config = config_packages if config_packages else setup_service_config(workspace)
project_name = project_config.get("name")
if example_dir is None or example_dir == "":
raise CLIException("example_dir is required")
src = os.path.abspath(example_dir)
if not os.path.isdir(src):
raise CLIException(f"example_dir '{example_dir}' is not valid directory")
prod_dir = get_prod_dir(workspace, project_name)
if not os.path.exists(prod_dir):
raise CLIException("please use nvflare poc prepare to create workspace first")
console_dir = os.path.join(prod_dir, f"{service_config[SC.FLARE_PROJ_ADMIN]}")
startup_dir = os.path.join(console_dir, SC.STARTUP)
transfer = get_upload_dir(startup_dir)
dst = os.path.join(console_dir, transfer)
if not is_dir_empty(dst):
print(" ")
answer = input(f"Examples at {dst} is already exists, replace with new one ? (y/N) ")
if answer.strip().upper() == "Y":
if os.path.islink(dst):
os.unlink(dst)
if os.path.isdir(dst):
shutil.rmtree(dst, ignore_errors=True)
print(f"link examples from {src} to {dst}")
os.symlink(src, dst)
else:
if os.path.isdir(dst):
shutil.rmtree(dst, ignore_errors=True)
print(f"link examples from {src} to {dst}")
os.symlink(src, dst)
def get_prod_dir(workspace, project_name: str = DEFAULT_PROJECT_NAME):
prod_dir = os.path.join(workspace, project_name, "prod_00")
return prod_dir
def gen_project_config_file(workspace: str) -> str:
project_file = os.path.join(workspace, "project.yml")
if not os.path.isfile(project_file):
gen_default_project_config("dummy_project.yml", project_file)
return project_file
def verify_host(host_name: str) -> bool:
try:
host_name = socket.gethostbyname(host_name)
return True
except:
return False
def verify_hosts(project_config: OrderedDict):
hosts: List[str] = get_project_hosts(project_config)
for h in hosts:
if not verify_host(h):
print(f"host name: '{h}' is not defined, considering modify /etc/hosts to add localhost alias")
exit(0)
def get_project_hosts(project_config) -> List[str]:
participants: List[dict] = project_config["participants"]
return [p["name"] for p in participants if p["type"] == "client" or p["type"] == "server"]
def get_fl_server_name(project_config: OrderedDict) -> str:
participants: List[dict] = project_config["participants"]
servers = [p["name"] for p in participants if p["type"] == "server"]
if len(servers) == 1:
return servers[0]
else:
raise CLIException(f"project should only have one server, but {len(servers)} are provided: {servers}")
def get_proj_admin(project_config: OrderedDict):
participants: List[dict] = project_config["participants"]
admins = [p["name"] for p in participants if p["type"] == "admin"]
if len(admins) == 1:
return admins[0]
else:
raise CLIException(f"project should only have only one project admin, but {len(admins)} are provided: {admins}")
def get_fl_client_names(project_config: OrderedDict) -> List[str]:
participants: List[dict] = project_config["participants"]
client_names = [p["name"] for p in participants if p["type"] == "client"]
return client_names
def prepare_builders(project_dict: OrderedDict) -> List:
builders = list()
admin_name = [p["name"] for p in project_dict["participants"] if p["type"] == "admin"][0]
for b in project_dict.get("builders"):
path = b.get("path")
args = b.get("args")
if b.get("path") == "nvflare.lighter.impl.static_file.StaticFileBuilder":
path = "nvflare.lighter.impl.local_static_file.LocalStaticFileBuilder"
args["overseer_agent"]["args"]["sp_end_point"] = "localhost:8002:8003"
args["username"] = admin_name
elif b.get("path") == "nvflare.lighter.impl.cert.CertBuilder":
path = "nvflare.lighter.impl.local_cert.LocalCertBuilder"
builders.append(instantiate_class(path, args))
return builders
def local_provision(
clients: List[str],
number_of_clients: int,
workspace: str,
docker_image: str,
use_he: bool = False,
project_conf_path: str = "",
) -> Tuple:
user_provided_project_config = False
if project_conf_path:
src_project_file = project_conf_path
dst_project_file = os.path.join(workspace, "project.yml")
user_provided_project_config = True
else:
src_project_file = gen_project_config_file(workspace)
dst_project_file = src_project_file
print(f"provision at {workspace} for {number_of_clients} clients with {src_project_file}")
project_config: OrderedDict = load_yaml(src_project_file)
if not project_config:
raise CLIException(f"empty or invalid project config from project yaml file: {src_project_file}")
if not user_provided_project_config:
project_config = update_server_name(project_config)
project_config = update_clients(clients, number_of_clients, project_config)
project_config = add_he_builder(use_he, project_config)
if docker_image:
project_config = update_static_file_builder(docker_image, project_config)
save_project_config(project_config, dst_project_file)
service_config = get_service_config(project_config)
project = prepare_project(project_config)
builders = prepare_builders(project_config)
provisioner = Provisioner(workspace, builders)
provisioner.provision(project)
return project_config, service_config
def get_service_config(project_config):
service_config = {
SC.FLARE_SERVER: get_fl_server_name(project_config),
SC.FLARE_PROJ_ADMIN: get_proj_admin(project_config),
SC.FLARE_CLIENTS: get_fl_client_names(project_config),
SC.IS_DOCKER_RUN: is_docker_run(project_config),
}
return service_config
def save_project_config(project_config, project_file):
with open(project_file, "w") as file:
yaml.dump(project_config, file)
def update_server_name(project_config):
old_server_name = get_fl_server_name(project_config)
server_name = "server"
if old_server_name != server_name:
update_project_server_name_config(project_config, old_server_name, server_name)
return project_config
def is_docker_run(project_config: OrderedDict):
if "builders" not in project_config:
return False
static_builder = [
b
for b in project_config.get("builders")
if b.get("path") == "nvflare.lighter.impl.static_file.StaticFileBuilder"
][0]
return "docker_image" in static_builder["args"]
def update_static_file_builder(docker_image: str, project_config: OrderedDict):
# need to keep the order of the builders
for b in project_config.get("builders"):
if b.get("path") == "nvflare.lighter.impl.static_file.StaticFileBuilder":
b["args"]["docker_image"] = docker_image
return project_config
def add_docker_builder(use_docker: bool, project_config: OrderedDict):
if use_docker:
docker_builder = {
"path": "nvflare.lighter.impl.docker.DockerBuilder",
"args": {"base_image": "python:3.8", "requirements_file": "requirements.txt"},
}
project_config["builders"].append(docker_builder)
return project_config
def add_he_builder(use_he: bool, project_config: OrderedDict):
if use_he:
he_builder = {
"path": "nvflare.lighter.impl.he.HEBuilder",
"args": {},
}
project_config["builders"].append(he_builder)
return project_config
def update_clients(clients: List[str], n_clients: int, project_config: OrderedDict) -> OrderedDict:
requested_clients = prepare_clients(clients, n_clients)
participants: List[dict] = project_config["participants"]
new_participants = [p for p in participants if p["type"] != "client"]
for client in requested_clients:
client_dict = {"name": client, "type": "client", "org": "nvidia"}
new_participants.append(client_dict)
project_config["participants"] = new_participants
return project_config
def prepare_clients(clients, number_of_clients):
if not clients:
clients = []
for i in range(number_of_clients):
clients.append(f"site-{(i + 1)}")
return clients
def save_startup_kit_dir_config(workspace, project_name):
dst = get_hidden_nvflare_config_path()
prod_dir = get_prod_dir(workspace, project_name)
conf = f"""
startup_kit {{
path = {prod_dir}
}}
poc_workspace {{
path = {workspace}
}}
"""
with open(dst, "w") as file:
file.write(conf)
def prepare_poc(cmd_args):
poc_workspace = get_poc_workspace()
project_conf_path = ""
if cmd_args.project_input:
project_conf_path = cmd_args.project_input
_prepare_poc(
cmd_args.clients,
cmd_args.number_of_clients,
poc_workspace,
cmd_args.docker_image,
cmd_args.he,
project_conf_path,
)
def _prepare_poc(
clients: List[str],
number_of_clients: int,
workspace: str,
docker_image: str = None,
use_he: bool = False,
project_conf_path: str = "",
examples_dir: Optional[str] = None,
) -> bool:
if clients:
number_of_clients = len(clients)
if not project_conf_path:
print(f"prepare poc at {workspace} for {number_of_clients} clients")
else:
print(f"prepare poc at {workspace} with {project_conf_path}")
project_config = None
result = False
if os.path.exists(workspace):
answer = input(
f"This will delete poc folder in {workspace} directory and create a new one. Is it OK to proceed? (y/N) "
)
if answer.strip().upper() == "Y":
from pathlib import Path
workspace_path = Path(workspace)
project_file = Path(project_conf_path)
if workspace_path in project_file.parents:
raise CLIException(
f"\nProject file: '{project_conf_path}' is under workspace directory:"
f"'{workspace}', which is to be deleted. "
f"Please copy {project_conf_path} to different location before running this command."
)
shutil.rmtree(workspace, ignore_errors=True)
project_config = prepare_poc_provision(
clients, number_of_clients, workspace, docker_image, use_he, project_conf_path, examples_dir
)
result = True
else:
result = False
else:
project_config = prepare_poc_provision(
clients, number_of_clients, workspace, docker_image, use_he, project_conf_path, examples_dir
)
result = True
project_name = project_config.get("name") if project_config else None
save_startup_kit_dir_config(workspace, project_name)
return result
def get_home_dir():
from pathlib import Path
return Path.home()
def get_hidden_nvflare_config_path() -> str:
"""
Get the path for the hidden nvflare configuration file.
Returns:
str: The path to the hidden nvflare configuration file.
"""
home_dir = get_home_dir()
hidden_nvflare_dir = pathlib.Path(home_dir) / ".nvflare"
try:
hidden_nvflare_dir.mkdir(exist_ok=True)
except OSError as e:
raise RuntimeError(f"Error creating the hidden nvflare directory: {e}")
hidden_nvflare_config_file = hidden_nvflare_dir / "config.conf"
return str(hidden_nvflare_config_file)
def prepare_poc_provision(
clients: List[str],
number_of_clients: int,
workspace: str,
docker_image: str,
use_he: bool = False,
project_conf_path: str = "",
examples_dir: Optional[str] = None,
) -> Dict:
os.makedirs(workspace, exist_ok=True)
os.makedirs(os.path.join(workspace, "data"), exist_ok=True)
project_config, service_config = local_provision(
clients, number_of_clients, workspace, docker_image, use_he, project_conf_path
)
project_name = project_config.get("name")
server_name = service_config[SC.FLARE_SERVER]
# update storage
if workspace != DEFAULT_WORKSPACE:
prod_dir = get_prod_dir(workspace, project_name)
update_storage_locations(local_dir=f"{prod_dir}/{server_name}/local", workspace=workspace)
examples_dir = get_examples_dir(examples_dir)
if examples_dir is not None:
_prepare_examples(examples_dir, workspace, None)
return project_config
def get_examples_dir(examples_dir):
if examples_dir:
return examples_dir
nvflare_home = get_nvflare_home()
default_examples_dir = os.path.join(nvflare_home, SC.EXAMPLES) if nvflare_home else None
return default_examples_dir
def _sort_service_cmds(cmd_type, service_cmds: list, service_config) -> list:
def sort_first(val):
return val[0]
order_services = []
for service_name, cmd_path in service_cmds:
if service_name == service_config[SC.FLARE_SERVER]:
order_services.append((0, service_name, cmd_path))
elif service_name == service_config[SC.FLARE_PROJ_ADMIN]:
order_services.append((sys.maxsize, service_name, cmd_path))
else:
if len(service_cmds) == 1:
order_services.append((0, service_name, cmd_path))
else:
order_services.append((random.randint(2, len(service_cmds)), service_name, cmd_path))
order_services.sort(key=sort_first)
if cmd_type == SC.CMD_STOP:
order_services.reverse()
return [(service_name, cmd_path) for n, service_name, cmd_path in order_services]
def get_cmd_path(poc_workspace, service_name, cmd):
service_dir = os.path.join(poc_workspace, service_name)
bin_dir = os.path.join(service_dir, SC.STARTUP)
cmd_path = os.path.join(bin_dir, cmd)
return cmd_path
def is_poc_ready(poc_workspace: str, service_config, project_config):
# check server and admin directories exist
project_name = project_config.get("name") if project_config else DEFAULT_PROJECT_NAME
prod_dir = get_prod_dir(poc_workspace, project_name)
console_dir = os.path.join(prod_dir, service_config[SC.FLARE_PROJ_ADMIN])
server_dir = os.path.join(prod_dir, service_config[SC.FLARE_SERVER])
return os.path.isdir(server_dir) and os.path.isdir(console_dir)
def validate_poc_workspace(poc_workspace: str, service_config, project_config=None):
if not is_poc_ready(poc_workspace, service_config, project_config):
raise CLIException(f"workspace {poc_workspace} is not ready, please use poc prepare to prepare poc workspace")
def validate_gpu_ids(gpu_ids: list, host_gpu_ids: list):
for gpu_id in gpu_ids:
if gpu_id not in host_gpu_ids:
raise CLIException(
f"gpu_id provided is not available in the host machine, available GPUs are {host_gpu_ids}"
)
def get_gpu_ids(user_input_gpu_ids, host_gpu_ids) -> List[int]:
if type(user_input_gpu_ids) == int and user_input_gpu_ids == -1:
gpu_ids = host_gpu_ids
else:
gpu_ids = user_input_gpu_ids
validate_gpu_ids(gpu_ids, host_gpu_ids)
return gpu_ids
def start_poc(cmd_args):
poc_workspace = get_poc_workspace()
services_list = get_service_list(cmd_args)
excluded = get_excluded(cmd_args)
gpu_ids = get_gpis(cmd_args)
_start_poc(poc_workspace, gpu_ids, excluded, services_list)
def get_gpis(cmd_args):
if cmd_args.gpu is not None and isinstance(cmd_args.gpu, list) and len(cmd_args.gpu) > 0:
gpu_ids = get_gpu_ids(cmd_args.gpu, get_local_host_gpu_ids())
else:
gpu_ids = []
return gpu_ids
def get_excluded(cmd_args):
excluded = None
if cmd_args.exclude != "":
excluded = [cmd_args.exclude]
return excluded
def get_service_list(cmd_args):
if cmd_args.service != "all":
services_list = [cmd_args.service]
else:
services_list = []
return services_list
def _start_poc(poc_workspace: str, gpu_ids: List[int], excluded=None, services_list=None):
project_config, service_config = setup_service_config(poc_workspace)
if services_list is None:
services_list = []
if excluded is None:
excluded = []
print(f"start_poc at {poc_workspace}, gpu_ids={gpu_ids}, excluded = {excluded}, services_list={services_list}")
validate_services(project_config, services_list, excluded)
validate_poc_workspace(poc_workspace, service_config, project_config)
_run_poc(
SC.CMD_START,
poc_workspace,
gpu_ids,
service_config,
project_config,
excluded=excluded,
services_list=services_list,
)
def validate_services(project_config, services_list: List, excluded: List):
participant_names = [p["name"] for p in project_config["participants"]]
validate_participants(participant_names, services_list)
validate_participants(participant_names, excluded)
def validate_participants(participant_names, list_participants):
for p in list_participants:
if p not in participant_names:
print(f"participant '{p}' is not defined, expecting one of followings: {participant_names}")
exit(1)
def setup_service_config(poc_workspace) -> Tuple:
project_file = os.path.join(poc_workspace, "project.yml")
if os.path.isfile(project_file):
project_config = load_yaml(project_file)
service_config = get_service_config(project_config) if project_config else None
return project_config, service_config
else:
raise CLIException(f"{project_file} is missing, make sure you have first run 'nvflare poc prepare'")
def stop_poc(cmd_args):
poc_workspace = get_poc_workspace()
excluded = get_excluded(cmd_args)
services_list = get_service_list(cmd_args)
_stop_poc(poc_workspace, excluded, services_list)
def _stop_poc(poc_workspace: str, excluded=None, services_list=None):
project_config, service_config = setup_service_config(poc_workspace)
if services_list is None:
services_list = []
if excluded is None:
excluded = [service_config[SC.FLARE_PROJ_ADMIN]]
else:
excluded.append(service_config[SC.FLARE_PROJ_ADMIN])
validate_services(project_config, services_list, excluded)
validate_poc_workspace(poc_workspace, service_config, project_config)
gpu_ids: List[int] = []
project_name = project_config.get("name")
prod_dir = get_prod_dir(poc_workspace, project_name)
p_size = len(services_list)
if p_size == 0 or service_config[SC.FLARE_SERVER] in services_list:
print("start shutdown NVFLARE")
shutdown_system(prod_dir, username=service_config[SC.FLARE_PROJ_ADMIN])
else:
print(f"start shutdown {services_list}")
_run_poc(
SC.CMD_STOP,
poc_workspace,
gpu_ids,
service_config,
project_config,
excluded=excluded,
services_list=services_list,
)
def _get_clients(service_commands: list, service_config) -> List[str]:
clients = [
service_dir_name
for service_dir_name, _ in service_commands
if service_dir_name != service_config[SC.FLARE_PROJ_ADMIN]
and service_dir_name != service_config[SC.FLARE_SERVER]
]
return clients
def _build_commands(
cmd_type: str, poc_workspace: str, service_config, project_config, excluded: list, services_list=None
) -> list:
"""Builds commands.
Args:
cmd_type (str): start/stop
poc_workspace (str): poc workspace directory path
service_config (_type_): service_config
excluded (list): excluded service/participants name
services_list (_type_, optional): Service names. If empty, include every service/participants
Returns:
list: built commands
"""
def is_fl_service_dir(p_dir_name: str) -> bool:
fl_service = (
p_dir_name == service_config[SC.FLARE_PROJ_ADMIN]
or p_dir_name == service_config[SC.FLARE_SERVER]
or p_dir_name in service_config[SC.FLARE_CLIENTS]
)
return fl_service
project_name = project_config.get("name")
prod_dir = get_prod_dir(poc_workspace, project_name)
if services_list is None:
services_list = []
service_commands = []
for root, dirs, files in os.walk(prod_dir):
if root == prod_dir:
fl_dirs = [d for d in dirs if is_fl_service_dir(d)]
for service_dir_name in fl_dirs:
if service_dir_name not in excluded:
if len(services_list) == 0 or service_dir_name in services_list:
cmd = get_service_command(cmd_type, prod_dir, service_dir_name, service_config)
if cmd:
service_commands.append((service_dir_name, cmd))
return _sort_service_cmds(cmd_type, service_commands, service_config)
def prepare_env(service_name, gpu_ids: Optional[List[int]], service_config: Dict):
import os
my_env = None
if gpu_ids:
my_env = os.environ.copy()
if len(gpu_ids) > 0:
my_env["CUDA_VISIBLE_DEVICES"] = ",".join([str(gid) for gid in gpu_ids])
if service_config.get(SC.IS_DOCKER_RUN):
my_env = os.environ.copy() if my_env is None else my_env
if gpu_ids and len(gpu_ids) > 0:
my_env["GPU2USE"] = f"--gpus={my_env['CUDA_VISIBLE_DEVICES']}"
my_env["MY_DATA_DIR"] = os.path.join(get_poc_workspace(), "data")
my_env["SVR_NAME"] = service_name
return my_env
def async_process(service_name, cmd_path, gpu_ids: Optional[List[int]], service_config: Dict):
my_env = prepare_env(service_name, gpu_ids, service_config)
if my_env:
subprocess.Popen(cmd_path.split(" "), env=my_env)
else:
subprocess.Popen(cmd_path.split(" "))
def sync_process(service_name, cmd_path):
my_env = os.environ.copy()
subprocess.run(cmd_path.split(" "), env=my_env)
def _run_poc(
cmd_type: str,
poc_workspace: str,
gpu_ids: List[int],
service_config: Dict,
project_config: Dict,
excluded: list,
services_list=None,
):
if services_list is None:
services_list = []
service_commands = _build_commands(cmd_type, poc_workspace, service_config, project_config, excluded, services_list)
clients = _get_clients(service_commands, service_config)
gpu_assignments: Dict[str, List[int]] = client_gpu_assignments(clients, gpu_ids)
for service_name, cmd_path in service_commands:
if service_name == service_config[SC.FLARE_PROJ_ADMIN]:
# give other commands a chance to start first
if len(service_commands) > 1:
time.sleep(2)
sync_process(service_name, cmd_path)
elif service_name == service_config[SC.FLARE_SERVER]:
async_process(service_name, cmd_path, None, service_config)
else:
async_process(service_name, cmd_path, gpu_assignments[service_name], service_config)
def clean_poc(cmd_args):
poc_workspace = get_poc_workspace()
_clean_poc(poc_workspace)
def _clean_poc(poc_workspace: str):
import shutil
if os.path.isdir(poc_workspace):
project_config, service_config = setup_service_config(poc_workspace)
if project_config is not None:
if is_poc_ready(poc_workspace, service_config, project_config):
shutil.rmtree(poc_workspace, ignore_errors=True)
print(f"{poc_workspace} is removed")
else:
raise CLIException(f"{poc_workspace} is not valid poc directory")
else:
raise CLIException(f"{poc_workspace} is not valid poc directory")
poc_sub_cmd_handlers = {
CMD_PREPARE_POC: prepare_poc,
CMD_PREPARE_EXAMPLES: prepare_examples,
CMD_START_POC: start_poc,
CMD_STOP_POC: stop_poc,
CMD_CLEAN_POC: clean_poc,
}
def def_poc_parser(sub_cmd):
cmd = "poc"
parser = sub_cmd.add_parser(cmd)
add_legacy_options(parser)
poc_parser = parser.add_subparsers(title=cmd, dest="poc_sub_cmd", help="poc subcommand")
define_prepare_parser(poc_parser)
define_prepare_example_parser(poc_parser)
define_start_parser(poc_parser)
define_stop_parser(poc_parser)
define_clean_parser(poc_parser)
return {cmd: parser}
def add_legacy_options(parser):
parser.add_argument(
"--prepare",
dest="old_prepare_poc",
action="store_const",
const=old_prepare_poc,
help="deprecated, suggest use 'nvflare poc prepare'",
)
parser.add_argument(
"--start",
dest="old_start_poc",
action="store_const",
const=old_start_poc,
help="deprecated, suggest use 'nvflare poc start'",
)
parser.add_argument(
"--stop",
dest="old_stop_poc",
action="store_const",
const=old_stop_poc,
help="deprecated, suggest use 'nvflare poc stop'",
)
parser.add_argument(
"--clean",
dest="old_clean_poc",
action="store_const",
const=old_clean_poc,
help="deprecated, suggest use 'nvflare poc clean'",
)
def old_start_poc():
print(f"'nvflare poc --{CMD_START_POC}' is deprecated, please use 'nvflare poc {CMD_START_POC}' ")
def old_stop_poc():
print(f"'nvflare poc --{CMD_STOP_POC}' is deprecated, please use 'nvflare poc {CMD_STOP_POC}' ")
def old_clean_poc():
print(f"'nvflare poc --{CMD_CLEAN_POC}' is deprecated, please use 'nvflare poc {CMD_CLEAN_POC}' ")
def old_prepare_poc():
print(f"'nvflare poc --{CMD_PREPARE_POC}' is deprecated, please use 'nvflare poc {CMD_PREPARE_POC}' ")
def define_prepare_parser(poc_parser, cmd: Optional[str] = None, help_str: Optional[str] = None):
cmd = CMD_PREPARE_POC if cmd is None else cmd
help_str = "prepare poc environment by provisioning local project" if help_str is None else help_str
prepare_parser = poc_parser.add_parser(cmd, help=help_str)
prepare_parser.add_argument(
"-n", "--number_of_clients", type=int, nargs="?", default=2, help="number of sites or clients, default to 2"
)
prepare_parser.add_argument(
"-c",
"--clients",
nargs="*", # 0 or more values expected => creates a list
type=str,
default=[], # default if nothing is provided
help="Space separated client names. If specified, number_of_clients argument will be ignored.",
)
prepare_parser.add_argument(
"-he",
"--he",
action="store_true",
help="enable homomorphic encryption. ",
)
prepare_parser.add_argument(
"-i",
"--project_input",
type=str,
nargs="?",
default="",
help="project.yaml file path, If specified, "
+ "'number_of_clients','clients' and 'docker' specific options will be ignored.",
)
prepare_parser.add_argument(
"-d",
"--docker_image",
nargs="?",
default=None,
const="nvflare/nvflare",
help="generate docker.sh based on the docker_image, used in '--prepare' command. and generate docker.sh "
+ " 'start/stop' commands will start with docker.sh ",
)
prepare_parser.add_argument("-debug", "--debug", action="store_true", help="debug is on")
def define_prepare_example_parser(poc_parser):
prepare_example_parser = poc_parser.add_parser(CMD_PREPARE_EXAMPLES, help="prepare examples")
prepare_example_parser.add_argument(
"-e", "--examples", type=str, nargs="?", default=None, help="examples directory"
)
prepare_example_parser.add_argument("-debug", "--debug", action="store_true", help="debug is on")
def define_clean_parser(poc_parser):
clean_parser = poc_parser.add_parser(CMD_CLEAN_POC, help="clean up poc workspace")
clean_parser.add_argument("-debug", "--debug", action="store_true", help="debug is on")
def define_start_parser(poc_parser):
start_parser = poc_parser.add_parser(CMD_START_POC, help="start services in poc mode")
start_parser.add_argument(
"-p",
"--service",
type=str,
nargs="?",
default="all",
help="participant, Default to all participants",
)
start_parser.add_argument(
"-ex",
"--exclude",
type=str,
nargs="?",
default="",
help="exclude service directory during 'start', default to " ", i.e. nothing to exclude",
)
start_parser.add_argument(
"-gpu",
"--gpu",
type=int,
nargs="*",
default=None,
help="gpu device ids will be used as CUDA_VISIBLE_DEVICES. used for poc start command",
)
start_parser.add_argument("-debug", "--debug", action="store_true", help="debug is on")
def define_stop_parser(poc_parser):
stop_parser = poc_parser.add_parser(CMD_STOP_POC, help="stop services in poc mode")
stop_parser.add_argument(
"-p",
"--service",
type=str,
nargs="?",
default="all",
help="participant, Default to all participants",
)
stop_parser.add_argument(
"-ex",
"--exclude",
type=str,
nargs="?",
default="",
help="exclude service directory during 'stop', default to " ", i.e. nothing to exclude",
)
stop_parser.add_argument("-debug", "--debug", action="store_true", help="debug is on")
def get_local_host_gpu_ids():
try:
return get_host_gpu_ids()
except Exception as e:
raise CLIException(f"Failed to get host gpu ids:{e}")
def handle_poc_cmd(cmd_args):
if cmd_args.poc_sub_cmd:
poc_cmd_handler = poc_sub_cmd_handlers.get(cmd_args.poc_sub_cmd, None)
poc_cmd_handler(cmd_args)
elif cmd_args.old_start_poc:
old_start_poc()
elif cmd_args.old_stop_poc:
old_stop_poc()
elif cmd_args.old_clean_poc:
old_clean_poc()
elif cmd_args.old_prepare_poc:
old_prepare_poc()
else:
raise CLIUnknownCmdException("unknown command")
def get_poc_workspace():
poc_workspace = os.getenv("NVFLARE_POC_WORKSPACE")
if not poc_workspace:
src_path = get_hidden_nvflare_config_path()
if os.path.isfile(src_path):
from pyhocon import ConfigFactory as CF
config = CF.parse_file(src_path)
poc_workspace = config.get("poc_workspace.path", None)
if poc_workspace is None or len(poc_workspace.strip()) == 0:
poc_workspace = DEFAULT_WORKSPACE
return poc_workspace
| NVFlare-main | nvflare/tool/poc/poc_commands.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/tool/poc/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
from abc import ABC, abstractmethod
from collections import defaultdict
from subprocess import TimeoutExpired
from nvflare.tool.package_checker.check_rule import CHECK_PASSED, CheckResult, CheckRule
from nvflare.tool.package_checker.utils import run_command_in_subprocess, split_by_len
class PackageChecker(ABC):
def __init__(self):
self.report = defaultdict(list)
self.check_len = len("Checks")
self.problem_len = 80
self.fix_len = len("How to fix")
self.dry_run_timeout = 5
self.package_path = None
self.rules = []
@abstractmethod
def init_rules(self, package_path: str):
pass
def init(self, package_path: str):
if not os.path.exists(package_path):
raise RuntimeError(f"Package path: {package_path} does not exist.")
self.package_path = os.path.abspath(package_path)
self.init_rules(package_path)
@abstractmethod
def should_be_checked(self) -> bool:
"""Check if this package should be checked by this checker."""
pass
@abstractmethod
def get_dry_run_command(self) -> str:
"""Returns dry run command."""
pass
def get_dry_run_inputs(self):
return None
def stop_dry_run(self, force: bool = True):
# todo: add gracefully shutdown command
print("killing dry run process")
command = self.get_dry_run_command()
cmd = f"pkill -9 -f '{command}'"
process = run_command_in_subprocess(cmd)
out, err = process.communicate()
print(f"killed dry run process output: {out}")
print(f"killed dry run process err: {err}")
def check(self) -> int:
"""Checks if the package is runnable on the current system.
Returns:
0: if no dry-run process started.
1: if the dry-run process is started and return code is 0.
2: if the dry-run process is started and return code is not 0.
"""
ret_code = 0
try:
all_passed = True
for rule in self.rules:
if isinstance(rule, CheckRule):
result: CheckResult = rule(self.package_path, data=None)
self.add_report(rule.name, result.problem, result.solution)
if rule.required and result.problem != CHECK_PASSED:
all_passed = False
elif isinstance(rule, list):
result = CheckResult()
# ordered rules
for r in rule:
result = r(self.package_path, data=result.data)
self.add_report(r.name, result.problem, result.solution)
if r.required and result.problem != CHECK_PASSED:
all_passed = False
break
# check dry run
if all_passed:
ret_code = self.check_dry_run()
except Exception as e:
self.add_report(
"Package Error",
f"Exception happens in checking: {e}, this package is not in correct format.",
"Please download a new package.",
)
finally:
return ret_code
def check_dry_run(self) -> int:
"""Runs dry run command.
Returns:
0: if no process started.
1: if the process is started and return code is 0.
2: if the process is started and return code is not 0.
"""
command = self.get_dry_run_command()
dry_run_input = self.get_dry_run_inputs()
process = None
try:
process = run_command_in_subprocess(command)
if dry_run_input is not None:
out, _ = process.communicate(input=dry_run_input, timeout=self.dry_run_timeout)
else:
out, _ = process.communicate(timeout=self.dry_run_timeout)
ret_code = process.returncode
if ret_code == 0:
self.add_report(
"Check dry run",
CHECK_PASSED,
"N/A",
)
else:
self.add_report(
"Check dry run",
f"Can't start successfully: {out}",
"Please check the error message of dry run.",
)
except TimeoutExpired:
os.killpg(process.pid, signal.SIGTERM)
# Assumption, preflight check is focused on the connectivity, so we assume all sub-systems should
# behave as designed if configured correctly.
# In such case, a dry run for any of the sub systems (overseer, server(s), clients etc.) will
# run as service forever once started, unless it is asked to stop. Therefore, we will get TimeoutExpired
# with above assumption, we consider the sub-system as running in good condition if it is started running
# in give timeout period
self.add_report(
"Check dry run",
CHECK_PASSED,
"N/A",
)
finally:
if process:
if process.returncode == 0:
return 1
else:
return 2
else:
return 0
def add_report(self, check_name, problem_text: str, fix_text: str):
self.report[self.package_path].append((check_name, problem_text, fix_text))
self.check_len = max(self.check_len, len(check_name))
self.fix_len = max(self.fix_len, len(fix_text))
def _print_line(self):
print("|" + "-" * (self.check_len + self.problem_len + self.fix_len + 8) + "|")
def _print_row(self, check, problem, fix):
print(
"| {check:<{width1}s} | {problems:<{width2}s} | {fix:<{width3}s} |".format(
check=check,
problems=problem,
fix=fix,
width1=self.check_len,
width2=self.problem_len,
width3=self.fix_len,
)
)
def print_report(self):
total_width = self.check_len + self.problem_len + self.fix_len + 10
for package_path, results in self.report.items():
print("Checking Package: " + package_path)
print("-" * total_width)
if results:
self._print_row("Checks", "Problems", "How to fix")
else:
print("| {:{}s} |".format("Passed", total_width - 4))
for row in results:
self._print_line()
lines = split_by_len(row[1], max_len=self.problem_len)
self._print_row(row[0], lines[0], row[2])
for line in lines[1:]:
self._print_row("", line, "")
print("-" * total_width)
print()
| NVFlare-main | nvflare/tool/package_checker/package_checker.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .client_package_checker import ClientPackageChecker
from .utils import NVFlareConfig, NVFlareRole
class NVFlareConsolePackageChecker(ClientPackageChecker):
NVF_CONFIG = NVFlareConfig.ADMIN
NVF_ROLE = NVFlareRole.ADMIN
def get_dry_run_command(self) -> str:
return os.path.join(self.package_path, "startup", "fl_admin.sh")
def get_dry_run_inputs(self):
return os.path.basename(os.path.normpath(self.package_path))
| NVFlare-main | nvflare/tool/package_checker/nvflare_console_package_checker.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .client_package_checker import ClientPackageChecker
from .nvflare_console_package_checker import NVFlareConsolePackageChecker
from .overseer_package_checker import OverseerPackageChecker
from .package_checker import PackageChecker
from .server_package_checker import ServerPackageChecker
| NVFlare-main | nvflare/tool/package_checker/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shlex
import shutil
import socket
import ssl
import subprocess
import tempfile
import time
from typing import Any, Dict, Optional, Tuple
import grpc
from requests import Request, RequestException, Response, Session, codes
from requests.adapters import HTTPAdapter
from nvflare.fuel.hci.conn import ALL_END
class NVFlareConfig:
OVERSEER = "gunicorn.conf.py"
SERVER = "fed_server.json"
CLIENT = "fed_client.json"
ADMIN = "fed_admin.json"
class NVFlareRole:
SERVER = "server"
CLIENT = "client"
ADMIN = "admin"
def try_write_dir(path: str):
try:
created = False
if not os.path.exists(path):
created = True
os.makedirs(path, exist_ok=False)
fd, name = tempfile.mkstemp(dir=path)
with os.fdopen(fd, "w") as fp:
fp.write("dummy")
os.remove(name)
if created:
shutil.rmtree(path)
except OSError as e:
return e
def try_bind_address(host: str, port: int):
"""Tries to bind to address."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind((host, port))
except OSError as e:
return e
finally:
sock.close()
return None
def _create_http_session(ca_path=None, cert_path=None, prv_key_path=None):
session = Session()
adapter = HTTPAdapter(max_retries=1)
session.mount("https://", adapter)
if ca_path:
session.verify = ca_path
session.cert = (cert_path, prv_key_path)
return session
def _send_request(
session, api_point, headers: Optional[Dict[str, Any]] = None, payload: Optional[Dict[str, Any]] = None
) -> Response:
req = Request("POST", api_point, json=payload, headers=headers)
prepared = session.prepare_request(req)
resp = session.send(prepared)
return resp
def parse_overseer_agent_args(overseer_agent_conf: dict, required_args: list) -> dict:
result = {}
for k in required_args:
value = overseer_agent_conf.get("args", {}).get(k)
if value is None:
raise Exception(f"overseer agent missing arg '{k}'.")
result[k] = value
return result
def construct_dummy_response(overseer_agent_args: dict) -> Response:
psp = {"sp_end_point": overseer_agent_args["sp_end_point"], "primary": True}
response_content = {"primary_sp": psp, "sp_list": [psp]}
resp = Response()
resp.status_code = 200
resp._content = str.encode(json.dumps(response_content))
return resp
def is_dummy_overseer_agent(overseer_agent_class: str) -> bool:
if overseer_agent_class == "nvflare.ha.dummy_overseer_agent.DummyOverseerAgent":
return True
return False
def get_required_args_for_overseer_agent(overseer_agent_class: str, role: str) -> list:
"""Gets required argument list for a specific overseer agent class."""
if overseer_agent_class == "nvflare.ha.overseer_agent.HttpOverseerAgent":
required_args = ["overseer_end_point", "role", "project", "name"]
if role == NVFlareRole.SERVER:
required_args.extend(["fl_port", "admin_port"])
return required_args
elif overseer_agent_class == "nvflare.ha.dummy_overseer_agent.DummyOverseerAgent":
required_args = ["sp_end_point"]
return required_args
else:
raise Exception(f"overseer agent {overseer_agent_class} is not supported.")
def _prepare_data(args: dict):
data = dict(role=args["role"], project=args["project"])
if args["role"] == NVFlareRole.SERVER:
data["sp_end_point"] = ":".join([args["name"], args["fl_port"], args["admin_port"]])
return data
def _get_ca_cert_file_name():
return "rootCA.pem"
def _get_cert_file_name(role: str):
if role == NVFlareRole.SERVER:
return "server.crt"
return "client.crt"
def _get_prv_key_file_name(role: str):
if role == NVFlareRole.SERVER:
return "server.key"
return "client.key"
def split_by_len(item, max_len):
return [item[ind : ind + max_len] for ind in range(0, len(item), max_len)]
def check_overseer_running(
startup: str, overseer_agent_args: dict, role: str, retry: int = 3
) -> Tuple[Optional[Response], Optional[str]]:
"""Checks if overseer is running."""
session = _create_http_session(
ca_path=os.path.join(startup, _get_ca_cert_file_name()),
cert_path=os.path.join(startup, _get_cert_file_name(role)),
prv_key_path=os.path.join(startup, _get_prv_key_file_name(role)),
)
data = _prepare_data(overseer_agent_args)
try_count = 0
retry_delay = 1
resp = None
err = None
while try_count < retry:
try:
resp = _send_request(
session,
api_point=overseer_agent_args["overseer_end_point"] + "/heartbeat",
payload=data,
)
if resp:
break
except RequestException as e:
try_count += 1
time.sleep(retry_delay)
err = str(e)
return resp, err
def check_response(resp: Optional[Response]) -> bool:
if not resp:
return False
if resp.status_code != codes.ok:
return False
return True
def check_socket_server_running(startup: str, host: str, port: int) -> bool:
try:
# SSL communication
ctx = ssl.create_default_context()
ctx.minimum_version = ssl.TLSVersion.TLSv1_2
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = False
ctx.load_verify_locations(os.path.join(startup, _get_ca_cert_file_name()))
ctx.load_cert_chain(
certfile=os.path.join(startup, _get_cert_file_name(NVFlareRole.CLIENT)),
keyfile=os.path.join(startup, _get_prv_key_file_name(NVFlareRole.CLIENT)),
)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
with ctx.wrap_socket(sock) as secure_sock:
secure_sock.connect((host, port))
secure_sock.sendall(bytes(f"hello{ALL_END}", "utf-8"))
secure_sock.recv()
except Exception as e:
print(e)
return False
return True
def check_grpc_server_running(startup: str, host: str, port: int, token=None) -> bool:
with open(os.path.join(startup, _get_ca_cert_file_name()), "rb") as f:
trusted_certs = f.read()
with open(os.path.join(startup, _get_prv_key_file_name(NVFlareRole.CLIENT)), "rb") as f:
private_key = f.read()
with open(os.path.join(startup, _get_cert_file_name(NVFlareRole.CLIENT)), "rb") as f:
certificate_chain = f.read()
call_credentials = grpc.metadata_call_credentials(
lambda context, callback: callback((("x-custom-token", token),), None)
)
credentials = grpc.ssl_channel_credentials(
certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs
)
composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials)
channel = grpc.secure_channel(target=f"{host}:{port}", credentials=composite_credentials)
try:
grpc.channel_ready_future(channel).result(timeout=10)
except grpc.FutureTimeoutError:
return False
return True
def run_command_in_subprocess(command):
new_env = os.environ.copy()
process = subprocess.Popen(
shlex.split(command),
preexec_fn=os.setsid,
env=new_env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
return process
| NVFlare-main | nvflare/tool/package_checker/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .check_rule import CheckAddressBinding
from .package_checker import PackageChecker
from .utils import NVFlareConfig
def _get_overseer_host_and_port(package_path: str):
gunicorn_conf_file = os.path.join(package_path, "startup", NVFlareConfig.OVERSEER)
gunicorn_conf = {}
with open(gunicorn_conf_file, "r") as f:
lines = f.read().splitlines()
for line in lines:
k, v = line.split("=")
if v[0] == '"' and v[-1] == '"':
v = str(v[1:-1])
gunicorn_conf[k] = v
address = gunicorn_conf["bind"]
host, port = address.split(":")
return host, int(port)
class OverseerPackageChecker(PackageChecker):
def should_be_checked(self) -> bool:
"""Check if this package should be checked by this checker."""
gunicorn_conf_file = os.path.join(self.package_path, "startup", NVFlareConfig.OVERSEER)
if os.path.exists(gunicorn_conf_file):
return True
return False
def init_rules(self, package_path):
self.dry_run_timeout = 5
self.rules = [
CheckAddressBinding(
name="Check overseer port binding", get_host_and_port_from_package=_get_overseer_host_and_port
),
]
def get_dry_run_command(self) -> str:
return os.path.join(self.package_path, "startup", "start.sh")
| NVFlare-main | nvflare/tool/package_checker/overseer_package_checker.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from abc import ABC, abstractmethod
from nvflare.tool.package_checker.utils import (
NVFlareConfig,
NVFlareRole,
check_grpc_server_running,
check_overseer_running,
check_response,
check_socket_server_running,
construct_dummy_response,
get_required_args_for_overseer_agent,
is_dummy_overseer_agent,
parse_overseer_agent_args,
try_bind_address,
try_write_dir,
)
CHECK_PASSED = "PASSED"
class CheckResult:
def __init__(self, problem="", solution="", data=None):
self.problem = problem
self.solution = solution
self.data = data
class CheckRule(ABC):
def __init__(self, name: str, required: bool = True):
"""Creates a CheckRule.
Args:
name (str): name of the rule
required (bool): whether this rule is required to pass.
"""
self.name = name
self.required = required
@abstractmethod
def __call__(self, package_path: str, data) -> CheckResult:
"""Returns problem and solution.
Returns:
A "CheckResult".
"""
pass
class CheckOverseerRunning(CheckRule):
def __init__(self, name: str, role: str):
super().__init__(name)
if role not in [NVFlareRole.SERVER, NVFlareRole.CLIENT, NVFlareRole.ADMIN]:
raise RuntimeError(f"role {role} is not supported.")
self.role = role
def __call__(self, package_path, data=None):
startup = os.path.join(package_path, "startup")
if self.role == NVFlareRole.SERVER:
nvf_config = NVFlareConfig.SERVER
elif self.role == NVFlareRole.CLIENT:
nvf_config = NVFlareConfig.CLIENT
else:
nvf_config = NVFlareConfig.ADMIN
fed_config_file = os.path.join(startup, nvf_config)
with open(fed_config_file, "r") as f:
fed_config = json.load(f)
if self.role == NVFlareRole.ADMIN:
overseer_agent_conf = fed_config["admin"]["overseer_agent"]
else:
overseer_agent_conf = fed_config["overseer_agent"]
overseer_agent_class = overseer_agent_conf.get("path")
required_args = get_required_args_for_overseer_agent(overseer_agent_class=overseer_agent_class, role=self.role)
overseer_agent_args = parse_overseer_agent_args(overseer_agent_conf, required_args)
if is_dummy_overseer_agent(overseer_agent_class):
resp = construct_dummy_response(overseer_agent_args=overseer_agent_args)
return CheckResult(CHECK_PASSED, "N/A", resp)
resp, err = check_overseer_running(startup=startup, overseer_agent_args=overseer_agent_args, role=self.role)
if err:
return CheckResult(
f"Can't connect to overseer ({overseer_agent_args['overseer_end_point']}): {err}",
"1) Please check if overseer is up or certificates are correct."
+ "2) Please check if overseer hostname in project.yml is available."
+ "3) if running in local machine, check if overseer defined in project.yml is defined in /etc/hosts",
)
elif not check_response(resp):
return CheckResult(
f"Can't connect to overseer ({overseer_agent_args['overseer_end_point']})",
"1) Please check if overseer is up or certificates are correct."
+ "2) Please check if overseer hostname in project.yml is available."
+ "3) if running in local machine, check if overseer defined in project.yml is defined in /etc/hosts",
)
return CheckResult(CHECK_PASSED, "N/A", resp)
class CheckAddressBinding(CheckRule):
def __init__(self, name: str, get_host_and_port_from_package):
super().__init__(name)
self.get_host_and_port_from_package = get_host_and_port_from_package
def __call__(self, package_path, data=None):
host, port = self.get_host_and_port_from_package(package_path)
e = try_bind_address(host, port)
if e:
return CheckResult(
f"Can't bind to address ({host}:{port}): {e}",
"Please check the DNS and port.",
)
return CheckResult(CHECK_PASSED, "N/A")
class CheckWriting(CheckRule):
def __init__(self, name: str, get_filename_from_package):
super().__init__(name)
self.get_filename_from_package = get_filename_from_package
def __call__(self, package_path, data=None):
path_to_write = self.get_filename_from_package(package_path)
e = None
if path_to_write:
e = try_write_dir(path_to_write)
if e:
return CheckResult(
f"Can't write to {path_to_write}: {e}.",
"Please check the user permission.",
)
return CheckResult(CHECK_PASSED, "N/A")
def _get_primary_sp(sp_list):
for sp in sp_list:
if sp["primary"]:
return sp
return None
class CheckSPListInResponse(CheckRule):
def __call__(self, package_path, data):
data = data.json()
sp_list = data.get("sp_list", [])
psp = _get_primary_sp(sp_list)
if psp is None:
return CheckResult(
"Can't get primary service provider from overseer",
"Please contact NVFLARE system admin and make sure at least one of the FL servers"
+ " is up and can connect to overseer.",
)
return CheckResult(CHECK_PASSED, "N/A", sp_list)
class CheckPrimarySPSocketServerAvailable(CheckRule):
def __call__(self, package_path, data):
startup = os.path.join(package_path, "startup")
psp = _get_primary_sp(data)
sp_end_point = psp["sp_end_point"]
sp_name, grpc_port, admin_port = sp_end_point.split(":")
if not check_socket_server_running(startup=startup, host=sp_name, port=int(admin_port)):
return CheckResult(
f"Can't connect to ({sp_name}:{admin_port}) / DNS can't resolve",
f" 1) If ({sp_name}:{admin_port}) is public, check internet connection, try ping ({sp_name}:{admin_port})."
f" 2) If ({sp_name}:{admin_port}) is private, then you need to add its ip to the etc/hosts."
f" 3) If network is good, Please contact NVFLARE system admin and make sure the primary FL server"
f" is running.",
)
return CheckResult(CHECK_PASSED, "N/A", data)
class CheckPrimarySPGRPCServerAvailable(CheckRule):
def __call__(self, package_path, data):
startup = os.path.join(package_path, "startup")
psp = _get_primary_sp(data)
sp_end_point = psp["sp_end_point"]
sp_name, grpc_port, admin_port = sp_end_point.split(":")
if not check_grpc_server_running(startup=startup, host=sp_name, port=int(grpc_port)):
return CheckResult(
f"Can't connect to primary service provider's grpc server ({sp_name}:{grpc_port})",
"Please check if server is up.",
)
return CheckResult(CHECK_PASSED, "N/A", data)
class CheckNonPrimarySPSocketServerAvailable(CheckRule):
def __call__(self, package_path, data):
startup = os.path.join(package_path, "startup")
for sp in data:
if not sp["primary"]:
sp_end_point = sp["sp_end_point"]
sp_name, grpc_port, admin_port = sp_end_point.split(":")
if not check_socket_server_running(startup=startup, host=sp_name, port=int(admin_port)):
return CheckResult(
f"Can't connect to ({sp_name}:{admin_port}) / DNS can't resolve",
f" 1) If ({sp_name}:{admin_port}) is public, check internet connection, try ping ({sp_name}:{admin_port})."
f" 2) If ({sp_name}:{admin_port}) is private, then you need to add its ip to the etc/hosts."
f" 3) If network is good, Please contact NVFLARE system admin and make sure the non-primary "
"FL server is running.",
)
return CheckResult(CHECK_PASSED, "N/A", data)
class CheckNonPrimarySPGRPCServerAvailable(CheckRule):
def __call__(self, package_path, data):
startup = os.path.join(package_path, "startup")
for sp in data:
if not sp["primary"]:
sp_end_point = sp["sp_end_point"]
sp_name, grpc_port, admin_port = sp_end_point.split(":")
if not check_grpc_server_running(startup=startup, host=sp_name, port=int(grpc_port)):
return CheckResult(
f"Can't connect to non-primary service provider's grpc server ({sp_name}:{grpc_port})",
"Please check if server is up.",
)
return CheckResult(CHECK_PASSED, "N/A", data)
| NVFlare-main | nvflare/tool/package_checker/check_rule.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import sys
from .check_rule import CheckAddressBinding, CheckOverseerRunning, CheckWriting
from .package_checker import PackageChecker
from .utils import NVFlareConfig, NVFlareRole
SERVER_SCRIPT = "nvflare.private.fed.app.server.server_train"
def _get_server_fed_config(package_path: str):
startup = os.path.join(package_path, "startup")
fed_config_file = os.path.join(startup, NVFlareConfig.SERVER)
with open(fed_config_file, "r") as f:
fed_config = json.load(f)
return fed_config
def _get_snapshot_storage_root(package_path: str) -> str:
fed_config = _get_server_fed_config(package_path)
snapshot_storage_root = ""
if (
fed_config.get("snapshot_persistor", {}).get("path")
== "nvflare.app_common.state_persistors.storage_state_persistor.StorageStatePersistor"
):
storage = fed_config["snapshot_persistor"].get("args", {}).get("storage")
if storage["path"] == "nvflare.app_common.storages.filesystem_storage.FilesystemStorage":
snapshot_storage_root = storage["args"]["root_dir"]
return snapshot_storage_root
def _get_job_storage_root(package_path: str) -> str:
fed_config = _get_server_fed_config(package_path)
job_storage_root = ""
for c in fed_config.get("components", []):
if c.get("path") == "nvflare.apis.impl.job_def_manager.SimpleJobDefManager":
job_storage_root = c["args"]["uri_root"]
return job_storage_root
def _get_grpc_host_and_port(package_path: str) -> (str, int):
fed_config = _get_server_fed_config(package_path)
server_conf = fed_config["servers"][0]
grpc_service_config = server_conf["service"]
grpc_target_address = grpc_service_config["target"]
_, port = grpc_target_address.split(":")
return "localhost", int(port)
def _get_admin_host_and_port(package_path: str) -> (str, int):
fed_config = _get_server_fed_config(package_path)
server_conf = fed_config["servers"][0]
return "localhost", int(server_conf["admin_port"])
class ServerPackageChecker(PackageChecker):
def __init__(self):
super().__init__()
self.snapshot_storage_root = None
self.job_storage_root = None
def init_rules(self, package_path):
self.dry_run_timeout = 3
self.rules = [
CheckOverseerRunning(name="Check overseer running", role=NVFlareRole.SERVER),
CheckAddressBinding(name="Check grpc port binding", get_host_and_port_from_package=_get_grpc_host_and_port),
CheckAddressBinding(
name="Check admin port binding", get_host_and_port_from_package=_get_admin_host_and_port
),
CheckWriting(name="Check snapshot storage writable", get_filename_from_package=_get_snapshot_storage_root),
CheckWriting(name="Check job storage writable", get_filename_from_package=_get_job_storage_root),
]
def should_be_checked(self) -> bool:
startup = os.path.join(self.package_path, "startup")
if os.path.exists(os.path.join(startup, NVFlareConfig.SERVER)):
return True
return False
def get_dry_run_command(self) -> str:
command = (
f"{sys.executable} -m {SERVER_SCRIPT}"
f" -m {self.package_path} -s {NVFlareConfig.SERVER}"
" --set secure_train=true config_folder=config"
)
self.snapshot_storage_root = _get_snapshot_storage_root(self.package_path)
self.job_storage_root = _get_job_storage_root(self.package_path)
return command
def stop_dry_run(self, force=True):
super().stop_dry_run(force=force)
if os.path.exists(self.snapshot_storage_root):
shutil.rmtree(self.snapshot_storage_root)
if os.path.exists(self.job_storage_root):
shutil.rmtree(self.job_storage_root)
| NVFlare-main | nvflare/tool/package_checker/server_package_checker.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from .check_rule import (
CheckNonPrimarySPGRPCServerAvailable,
CheckNonPrimarySPSocketServerAvailable,
CheckOverseerRunning,
CheckPrimarySPGRPCServerAvailable,
CheckPrimarySPSocketServerAvailable,
CheckSPListInResponse,
)
from .package_checker import PackageChecker
from .utils import NVFlareConfig, NVFlareRole
CLIENT_SCRIPT = "nvflare.private.fed.app.client.client_train"
class ClientPackageChecker(PackageChecker):
NVF_CONFIG = NVFlareConfig.CLIENT
NVF_ROLE = NVFlareRole.CLIENT
def should_be_checked(self) -> bool:
"""Check if this package should be checked by this checker."""
startup = os.path.join(self.package_path, "startup")
if os.path.exists(os.path.join(startup, self.NVF_CONFIG)):
return True
return False
def init_rules(self, package_path):
self.rules = [
[
CheckOverseerRunning(name="Check overseer running", role=self.NVF_ROLE),
CheckSPListInResponse(name="Check service provider list available"),
CheckPrimarySPSocketServerAvailable(name="Check primary SP's socket server available"),
CheckPrimarySPGRPCServerAvailable(name="Check primary SP's GRPC server available"),
CheckNonPrimarySPSocketServerAvailable(
name="Check non-primary SP's socket server available", required=False
),
CheckNonPrimarySPGRPCServerAvailable(
name="Check non-primary SP's GRPC server available", required=False
),
]
]
def get_dry_run_command(self) -> str:
command = (
f"{sys.executable} -m {CLIENT_SCRIPT}"
f" -m {self.package_path} -s {self.NVF_CONFIG}"
" --set secure_train=true config_folder=config"
)
return command
| NVFlare-main | nvflare/tool/package_checker/client_package_checker.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/tool/job/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import shutil
import traceback
from distutils.dir_util import copy_tree
from tempfile import mkdtemp
from typing import List, Optional, Tuple
from pyhocon import ConfigFactory as CF
from pyhocon import ConfigTree
from nvflare.cli_unknown_cmd_exception import CLIUnknownCmdException
from nvflare.fuel.flare_api.flare_api import new_secure_session
from nvflare.fuel.utils.config import ConfigFormat
from nvflare.fuel.utils.config_factory import ConfigFactory
from nvflare.tool.job.config.configer import (
build_config_file_indices,
filter_indices,
get_root_index,
merge_configs_from_cli,
)
from nvflare.tool.job.job_client_const import (
CONFIG_CONF,
CONFIG_FILE_BASE_NAME_WO_EXTS,
JOB_CONFIG_COMP_NAME,
JOB_CONFIG_FILE_NAME,
JOB_CONFIG_VAR_NAME,
JOB_CONFIG_VAR_VALUE,
JOB_INFO_CLIENT_TYPE,
JOB_INFO_CLIENT_TYPE_KEY,
JOB_INFO_CONF,
JOB_INFO_CONTROLLER_TYPE,
JOB_INFO_CONTROLLER_TYPE_KEY,
JOB_INFO_DESC,
JOB_INFO_DESC_KEY,
JOB_INFO_KEYS,
JOB_TEMPLATE,
JOB_TEMPLATE_CONF,
)
from nvflare.utils.cli_utils import (
find_job_templates_location,
get_curr_dir,
get_hidden_nvflare_dir,
get_startup_kit_dir,
save_config,
)
CMD_LIST_TEMPLATES = "list_templates"
CMD_SHOW_VARIABLES = "show_variables"
CMD_CREATE_JOB = "create"
CMD_SUBMIT_JOB = "submit"
def find_filename_basename(f: str):
basename = os.path.basename(f)
if "." in basename:
return os.path.splitext(basename)[0]
else:
return basename
def build_job_template_indices(job_templates_dir: str) -> ConfigTree:
conf = CF.parse_string("{ templates = {} }")
config_file_base_names = CONFIG_FILE_BASE_NAME_WO_EXTS
template_conf = conf.get("templates")
keys = JOB_INFO_KEYS
for root, dirs, files in os.walk(job_templates_dir):
config_files = [f for f in files if find_filename_basename(f) in config_file_base_names]
if len(config_files) > 0:
info_conf = get_template_info_config(root)
for key in keys:
value = info_conf.get(key, "NA") if info_conf else "NA"
template_name = os.path.basename(root)
template_conf.put(f"{template_name}.{key}", value)
return conf
def get_template_registry_file_path():
filename = JOB_TEMPLATE_CONF
hidden_nvflare_dir = get_hidden_nvflare_dir()
file_path = os.path.join(hidden_nvflare_dir, filename)
return file_path
def get_template_info_config(template_dir):
info_conf_path = os.path.join(template_dir, JOB_INFO_CONF)
return CF.parse_file(info_conf_path) if os.path.isfile(info_conf_path) else None
def create_job(cmd_args):
try:
prepare_job_folder(cmd_args)
job_templates_dir = find_job_templates_location()
template_index_conf = build_job_template_indices(job_templates_dir)
job_folder = cmd_args.job_folder
config_dir = get_config_dir(job_folder)
fmt, real_config_path = ConfigFactory.search_config_format("config_fed_server.conf", [config_dir])
if real_config_path and not cmd_args.force:
print(
f"""\nwarning: configuration files:\n
{"config_fed_server.[json|conf|yml]"} already exists.
\nNot generating the config files. If you would like to overwrite, use -force option"""
)
return
target_template_name = cmd_args.template
check_template_exists(target_template_name, template_index_conf)
src = os.path.join(job_templates_dir, target_template_name)
copy_tree(src=src, dst=config_dir)
prepare_meta_config(cmd_args, src)
remove_extra_file(config_dir)
variable_values = prepare_job_config(cmd_args)
display_template_variables(job_folder, variable_values)
except ValueError as e:
print(f"\nUnable to handle command: {CMD_CREATE_JOB} due to: {e} \n")
if cmd_args.debug:
print(traceback.format_exc())
sub_cmd_parser = job_sub_cmd_parser[CMD_CREATE_JOB]
if sub_cmd_parser:
sub_cmd_parser.print_help()
def remove_extra_file(config_dir):
extra_file = ["info.md", "info.conf"]
for ef in extra_file:
file_path = os.path.join(config_dir, ef)
if os.path.isfile(file_path):
os.remove(file_path)
def show_variables(cmd_args):
try:
if not os.path.isdir(cmd_args.job_folder):
raise ValueError("required job folder is not specified.")
config_dir = get_config_dir(cmd_args.job_folder)
indices = build_config_file_indices(config_dir)
variable_values = filter_indices(indices_configs=indices)
display_template_variables(cmd_args.job_folder, variable_values)
except ValueError as e:
print(f"\nUnable to handle command: {CMD_SHOW_VARIABLES} due to: {e} \n")
if cmd_args.debug:
print(traceback.format_exc())
sub_cmd_parser = job_sub_cmd_parser[CMD_SHOW_VARIABLES]
if sub_cmd_parser:
sub_cmd_parser.print_help()
def check_template_exists(target_template_name, template_index_conf):
targets = [os.path.basename(key) for key in template_index_conf.get("templates").keys()]
found = target_template_name in targets
if not found:
raise ValueError(
f"Invalid template name {target_template_name}, "
f"please check the available templates using nvflare job list_templates"
)
def display_template_variables(job_folder, variable_values):
print("\nThe following are the variables you can change in the template\n")
total_length = 135
left_margin = 1
print("-" * total_length)
job_folder_header = fix_length_format(f"job folder: {job_folder}", total_length)
print(" " * total_length)
print(" " * left_margin, job_folder_header)
print(" " * total_length)
print("-" * total_length)
file_name_fix_length = 30
var_name_fix_length = 30
var_value_fix_length = 35
var_comp_fix_length = 35
file_name = fix_length_format(JOB_CONFIG_FILE_NAME, file_name_fix_length)
var_name = fix_length_format(JOB_CONFIG_VAR_NAME, var_name_fix_length)
var_value = fix_length_format(JOB_CONFIG_VAR_VALUE, var_value_fix_length)
var_comp = fix_length_format(JOB_CONFIG_COMP_NAME, var_comp_fix_length)
print(" " * left_margin, file_name, var_name, var_value, var_comp)
print("-" * total_length)
for file in sorted(variable_values.keys()):
indices = variable_values.get(file)
file_name = os.path.basename(file)
file_name = fix_length_format(file_name, file_name_fix_length)
key_indices = indices
for index in sorted(key_indices.keys()):
key_index = key_indices[index]
var_name = fix_length_format(index, var_name_fix_length)
var_value = fix_length_format(str(key_index.value), var_value_fix_length)
var_comp = " " if key_index.component_name is None else key_index.component_name
var_comp = fix_length_format(var_comp, var_comp_fix_length)
print(" " * left_margin, file_name, var_name, var_value, var_comp)
print("")
print("-" * total_length)
def list_templates(cmd_args):
try:
job_templates_dir = find_job_templates_location(cmd_args.job_templates_dir)
job_templates_dir = os.path.abspath(job_templates_dir)
template_index_conf = build_job_template_indices(job_templates_dir)
display_available_templates(template_index_conf)
if job_templates_dir:
update_job_templates_dir(job_templates_dir)
except ValueError as e:
print(f"\nUnable to handle command: {CMD_LIST_TEMPLATES} due to: {e} \n")
if cmd_args.debug:
print(traceback.format_exc())
sub_cmd_parser = job_sub_cmd_parser[CMD_LIST_TEMPLATES]
if sub_cmd_parser:
sub_cmd_parser.print_help()
def update_job_templates_dir(job_templates_dir: str):
hidden_nvflare_dir = get_hidden_nvflare_dir()
file_path = os.path.join(hidden_nvflare_dir, CONFIG_CONF)
config = CF.parse_file(file_path)
config.put(f"{JOB_TEMPLATE}.path", job_templates_dir)
save_config(config, file_path)
def display_available_templates(template_index_conf):
print("\nThe following job templates are available: \n")
template_registry = template_index_conf.get("templates")
total_length = 120
left_margin = 1
print("-" * total_length)
name_fix_length = 15
description_fix_length = 60
controller_type_fix_length = 20
client_category_fix_length = 20
name = fix_length_format("name", name_fix_length)
description = fix_length_format(JOB_INFO_DESC, description_fix_length)
client_category = fix_length_format(JOB_INFO_CLIENT_TYPE, client_category_fix_length)
controller_type = fix_length_format(JOB_INFO_CONTROLLER_TYPE, controller_type_fix_length)
print(" " * left_margin, name, description, controller_type, client_category)
print("-" * total_length)
for file_path in sorted(template_registry.keys()):
name = os.path.basename(file_path)
template_info = template_registry.get(file_path, None)
if not template_info:
template_info = template_registry.get(name)
name = fix_length_format(name, name_fix_length)
description = fix_length_format(template_info.get(JOB_INFO_DESC_KEY), description_fix_length)
client_category = fix_length_format(template_info.get(JOB_INFO_CLIENT_TYPE_KEY), client_category_fix_length)
controller_type = fix_length_format(template_info.get(JOB_INFO_CONTROLLER_TYPE_KEY), controller_type_fix_length)
print(" " * left_margin, name, description, controller_type, client_category)
print("-" * total_length)
def fix_length_format(name: str, name_fix_length: int):
return f"{name[:name_fix_length]:{name_fix_length}}"
def submit_job(cmd_args):
temp_job_dir = None
try:
if not os.path.isdir(cmd_args.job_folder):
raise ValueError(f"invalid job folder: {cmd_args.job_folder}")
temp_job_dir = mkdtemp()
copy_tree(cmd_args.job_folder, temp_job_dir)
prepare_job_config(cmd_args, temp_job_dir)
admin_username, admin_user_dir = find_admin_user_and_dir()
internal_submit_job(admin_user_dir, admin_username, temp_job_dir)
except ValueError as e:
print(f"\nUnable to handle command: {CMD_SUBMIT_JOB} due to: {e} \n")
if cmd_args.debug:
print(traceback.format_exc())
sub_cmd_parser = job_sub_cmd_parser[CMD_SUBMIT_JOB]
if sub_cmd_parser:
sub_cmd_parser.print_help()
finally:
if temp_job_dir:
if cmd_args.debug:
print(f"in debug mode, job configurations can be examined in temp job directory '{temp_job_dir}'")
else:
shutil.rmtree(temp_job_dir)
def find_admin_user_and_dir() -> Tuple[str, str]:
startup_kit_dir = get_startup_kit_dir()
fed_admin_config = ConfigFactory.load_config("fed_admin.json", [startup_kit_dir])
admin_user_dir = None
admin_username = None
if fed_admin_config:
admin_user_dir = os.path.dirname(os.path.dirname(fed_admin_config.file_path))
config_dict = fed_admin_config.to_dict()
admin_username = config_dict["admin"].get("username", None)
else:
raise ValueError(f"Unable to locate fed_admin configuration from startup kid location {startup_kit_dir}")
return admin_username, admin_user_dir
def internal_submit_job(admin_user_dir, username, temp_job_dir):
print("trying to connect to the server")
sess = new_secure_session(username=username, startup_kit_location=admin_user_dir)
job_id = sess.submit_job(temp_job_dir)
print(f"job: '{job_id} was submitted")
job_sub_cmd_handlers = {
CMD_CREATE_JOB: create_job,
CMD_SUBMIT_JOB: submit_job,
CMD_LIST_TEMPLATES: list_templates,
CMD_SHOW_VARIABLES: show_variables,
}
job_sub_cmd_parser = {
CMD_CREATE_JOB: None,
CMD_SUBMIT_JOB: None,
CMD_LIST_TEMPLATES: None,
CMD_SHOW_VARIABLES: None,
}
def handle_job_cli_cmd(cmd_args):
job_cmd_handler = job_sub_cmd_handlers.get(cmd_args.job_sub_cmd, None)
if job_cmd_handler:
job_cmd_handler(cmd_args)
else:
raise CLIUnknownCmdException("\n invalid command. \n")
def def_job_cli_parser(sub_cmd):
cmd = "job"
parser = sub_cmd.add_parser(cmd)
job_subparser = parser.add_subparsers(title="job", dest="job_sub_cmd", help="job subcommand")
define_list_templates_parser(job_subparser)
define_create_job_parser(job_subparser)
define_submit_job_parser(job_subparser)
define_variables_parser(job_subparser)
return {cmd: parser}
def define_submit_job_parser(job_subparser):
submit_parser = job_subparser.add_parser("submit", help="submit job")
submit_parser.add_argument(
"-j",
"--job_folder",
type=str,
nargs="?",
default=os.path.join(get_curr_dir(), "current_job"),
help="job_folder path, default to ./current_job directory",
)
submit_parser.add_argument(
"-f",
"--config_file",
type=str,
action="append",
nargs="*",
help="""Training config file with corresponding optional key=value pairs.
If key presents in the preceding config file, the value in the config
file will be overwritten by the new value """,
)
submit_parser.add_argument(
"-a",
"--app_config",
type=str,
nargs="*",
help="""key=value options will be passed directly to script argument """,
)
submit_parser.add_argument("-debug", "--debug", action="store_true", help="debug is on")
job_sub_cmd_parser[CMD_SUBMIT_JOB] = submit_parser
def define_list_templates_parser(job_subparser):
show_jobs_parser = job_subparser.add_parser("list_templates", help="show available job templates")
show_jobs_parser.add_argument(
"-d",
"--job_templates_dir",
type=str,
nargs="?",
default=None,
help="Job template directory, if not specified, "
"will search from ./nvflare/config.conf and NVFLARE_HOME env. variables",
)
show_jobs_parser.add_argument("-debug", "--debug", action="store_true", help="debug is on")
job_sub_cmd_parser[CMD_LIST_TEMPLATES] = show_jobs_parser
def define_variables_parser(job_subparser):
show_variables_parser = job_subparser.add_parser(
"show_variables", help="show template variable values in configuration"
)
show_variables_parser.add_argument(
"-j",
"--job_folder",
type=str,
nargs="?",
default=os.path.join(get_curr_dir(), "current_job"),
help="job_folder path, default to ./current_job directory",
)
show_variables_parser.add_argument("-debug", "--debug", action="store_true", help="debug is on")
job_sub_cmd_parser[CMD_SHOW_VARIABLES] = show_variables_parser
def define_create_job_parser(job_subparser):
create_parser = job_subparser.add_parser("create", help="create job")
create_parser.add_argument(
"-j",
"--job_folder",
type=str,
nargs="?",
default=os.path.join(get_curr_dir(), "current_job"),
help="job_folder path, default to ./current_job directory",
)
create_parser.add_argument(
"-w",
"--template",
type=str,
nargs="?",
default="sag_pt",
help="""template name, use liste_templates to see available jobs from job templates """,
)
create_parser.add_argument("-s", "--script", type=str, nargs="?", help="""code script such as train.py""")
create_parser.add_argument(
"-sd",
"--script_dir",
type=str,
nargs="?",
help="""script directory contains additional related files.
All files or directories under this directory will be copied over
to the custom directory.""",
)
create_parser.add_argument(
"-f",
"--config_file",
type=str,
action="append",
nargs="*",
help="""Training config file with corresponding optional key=value pairs.
If key presents in the preceding config file, the value in the config
file will be overwritten by the new value """,
)
create_parser.add_argument(
"-a",
"--app_config",
type=str,
nargs="*",
help="""key=value options will be passed directly to script argument """,
)
create_parser.add_argument("-debug", "--debug", action="store_true", help="debug is on")
create_parser.add_argument(
"-force",
"--force",
action="store_true",
help="force create is on, if -force, " "overwrite existing configuration with newly created configurations",
)
job_sub_cmd_parser[CMD_CREATE_JOB] = create_parser
def prepare_job_config(cmd_args, tmp_job_dir: Optional[str] = None):
update_client_app_script(cmd_args)
merged_conf, config_modified = merge_configs_from_cli(cmd_args)
need_save_config = config_modified is True or tmp_job_dir is not None
if tmp_job_dir is None:
tmp_job_dir = cmd_args.job_folder
if need_save_config:
save_merged_configs(merged_conf, tmp_job_dir)
variable_values = filter_indices(merged_conf)
return variable_values
def update_client_app_script(cmd_args):
if cmd_args.app_config:
client_config, config_path = _update_client_app_config_script(cmd_args.job_folder, cmd_args.app_config)
save_config(client_config, config_path)
def _update_client_app_config_script(job_folder, app_configs: List[str]) -> Tuple[ConfigTree, str]:
xs = []
for cli_kv in app_configs:
tokens = cli_kv.split("=")
k, v = tokens[0], tokens[1]
xs.append((k, v))
config_args = " ".join([f"--{k} {v}" for k, v in xs])
config_dir = get_config_dir(job_folder)
config = ConfigFactory.load_config(os.path.join(config_dir, "config_fed_client.xxx"))
if config.format == ConfigFormat.JSON or config.format == ConfigFormat.OMEGACONF:
client_config = CF.from_dict(config.to_dict())
else:
client_config = config.conf
client_config.put("app_config", config_args)
return client_config, config.file_path
def save_merged_configs(merged_conf, tmp_job_dir):
for file, (config, excluded_key_List, key_indices) in merged_conf.items():
config_dir = pathlib.Path(tmp_job_dir) / "app" / "config"
base_filename = os.path.basename(file)
if base_filename.startswith("meta."):
config_dir = tmp_job_dir
dst_path = os.path.join(config_dir, base_filename)
root_index = get_root_index(next(iter(key_indices.values()))[0])
save_config(root_index.value, dst_path)
def prepare_meta_config(cmd_args, target_template_dir):
job_folder = cmd_args.job_folder
job_folder = job_folder[:-1] if job_folder.endswith("/") else job_folder
app_name = os.path.basename(job_folder)
meta_files = ["meta.json", "meta.conf", "meta.yml"]
dst_path = None
for mf in meta_files:
meta_path = os.path.join(job_folder, mf)
if os.path.isfile(meta_path):
dst_path = meta_path
break
src_meta_path = os.path.join(target_template_dir, "meta.conf")
if not os.path.isfile(src_meta_path):
dst_config = load_default_config_template("meta.conf")
else:
dst_config = CF.parse_file(src_meta_path)
# Use existing meta.conf if user already defined it.
if not dst_path or (dst_path and cmd_args.force):
dst_config.put("name", app_name)
dst_path = os.path.join(job_folder, "meta.conf")
save_config(dst_config, dst_path)
# clean up
config_dir = get_config_dir(job_folder)
for mf in meta_files:
meta_path = os.path.join(config_dir, mf)
if os.path.isfile(meta_path):
os.remove(meta_path)
def load_default_config_template(config_file_name: str):
file_dir = os.path.dirname(__file__)
# src config here is always pyhocon
config_template = CF.parse_file(os.path.join(file_dir, f"config/{config_file_name}"))
return config_template
def dst_app_path(job_folder: str):
return os.path.join(job_folder, "app")
def dst_config_path(job_folder, config_filename):
config_dir = get_config_dir(job_folder)
dst_path = os.path.join(config_dir, config_filename)
return dst_path
def get_config_dir(job_folder):
app_dir = dst_app_path(job_folder)
config_dir = os.path.join(app_dir, "config")
return config_dir
def convert_args_list_to_dict(kvs: Optional[List[str]] = None) -> dict:
"""
Convert a list of key-value strings to a dictionary.
Args:
kvs (Optional[List[str]]): A list of key-value strings in the format "key=value".
Returns:
dict: A dictionary containing the key-value pairs from the input list.
"""
kv_dict = {}
if kvs:
for kv in kvs:
try:
key, value = kv.split("=")
kv_dict[key.strip()] = value.strip()
except ValueError:
raise ValueError(f"Invalid key-value pair: '{kv}'")
return kv_dict
def prepare_job_folder(cmd_args):
job_folder = cmd_args.job_folder
if job_folder:
if not os.path.exists(job_folder):
os.makedirs(job_folder)
elif not os.path.isdir(job_folder):
raise ValueError(f"job_folder '{job_folder}' exits but not directory")
elif cmd_args.force:
shutil.rmtree(job_folder)
os.makedirs(job_folder)
app_dir = os.path.join(job_folder, "app")
app_config_dir = os.path.join(app_dir, "config")
app_custom_dir = os.path.join(app_dir, "custom")
dirs = [app_dir, app_config_dir, app_custom_dir]
for d in dirs:
os.makedirs(d, exist_ok=True)
if cmd_args.script and len(cmd_args.script.strip()) > 0:
if os.path.exists(cmd_args.script):
shutil.copy(cmd_args.script, app_custom_dir)
else:
raise ValueError(f"{cmd_args.script} doesn't exists")
if cmd_args.script_dir and len(cmd_args.script_dir.strip()) > 0:
if os.path.exists(cmd_args.script_dir):
copy_tree(cmd_args.script_dir, app_custom_dir)
else:
raise ValueError(f"{cmd_args.script_dir} doesn't exists")
| NVFlare-main | nvflare/tool/job/job_cli.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
JOB_INFO_DESC_KEY = "description"
JOB_INFO_DESC = "Description"
JOB_INFO_CONTROLLER_TYPE_KEY = "controller_type"
JOB_INFO_CONTROLLER_TYPE = "Controller Type"
JOB_INFO_CLIENT_TYPE_KEY = "client_category"
JOB_INFO_CLIENT_TYPE = "Client Category"
JOB_TEMPLATES = "job_templates"
JOB_TEMPLATE = "job_template"
JOB_TEMPLATE_CONF = "job_templates.conf"
JOB_INFO_CONF = "info.conf"
JOB_INFO_MD = "info.md"
JOB_INFO_KEYS = [JOB_INFO_DESC_KEY, JOB_INFO_CONTROLLER_TYPE_KEY, JOB_INFO_CLIENT_TYPE_KEY]
CONFIG_FILE_BASE_NAME_WO_EXTS = ["config_fed_client", "config_fed_server", "meta"]
CONFIG_FED_SERVER_CONF = "config_fed_server.conf"
CONFIG_FED_CLIENT_CONF = "config_fed_client.conf"
JOB_CONFIG_FILE_NAME = "file_name"
JOB_CONFIG_VAR_NAME = "var_name"
JOB_CONFIG_VAR_VALUE = "value"
JOB_CONFIG_COMP_NAME = "component"
JOB_TEMPLATE_NAME = "name"
CONFIG_CONF = "config.conf"
| NVFlare-main | nvflare/tool/job/job_client_const.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Any, Dict, List, Optional, Tuple
from pyhocon import ConfigTree
from nvflare.fuel.utils.config import ConfigFormat
from nvflare.tool.job.config.config_indexer import KeyIndex, build_reverse_order_index
def merge_configs_from_cli(cmd_args) -> Tuple[Dict[str, tuple], bool]:
indices: Dict[str, Tuple] = build_config_file_indices(cmd_args.job_folder)
cli_config_dict: Dict[str, Dict[str, str]] = get_cli_config(cmd_args)
config_modified = False
if cli_config_dict:
config_modified = True
copy_app_config_file(cli_config_dict, cmd_args)
return merge_configs(indices, cli_config_dict), config_modified
else:
return indices, config_modified
def copy_app_config_file(cli_config_dict, cmd_args):
config_dir = os.path.join(cmd_args.job_folder, "app/config")
for cli_config_file in cli_config_dict:
base_config_filename = os.path.basename(cli_config_file)
if base_config_filename.startswith("meta."):
target_dir = cmd_args.job_folder
else:
target_dir = config_dir
target_file = os.path.join(target_dir, base_config_filename)
if not os.path.exists(target_file):
shutil.copyfile(cli_config_file, target_file)
def extract_string_with_index(input_string):
"""
Extract the string before '[', the index within '[', and the string after ']'.
Args:
input_string (str): The input string containing the pattern '[index]'.
Returns:
list: A list of tuples containing the extracted components: (string_before, index, string_after).
"""
result = []
if not input_string.strip(" "):
return result
opening_bracket_index = input_string.find("[")
closing_bracket_index = input_string.find("]")
if opening_bracket_index > 0 and closing_bracket_index > 0:
string_before = input_string[:opening_bracket_index]
index = int(input_string[opening_bracket_index + 1 : closing_bracket_index])
string_after = input_string[closing_bracket_index + 1 :].strip(". ")
if string_after:
r = (string_before.strip("."), index, extract_string_with_index(string_after.strip(".")))
if r:
result.append(r)
else:
r = (string_before.strip("."), index, string_after)
result.append(r)
else:
result.append(input_string)
result = [elm for elm in result if len(elm) > 0]
return result
def filter_indices(indices_configs: Dict[str, Tuple]) -> Dict[str, Dict[str, Any]]:
result = {}
for file, (config, excluded_key_list, key_indices) in indices_configs.items():
result[file] = filter_config_name_and_values(excluded_key_list, key_indices)
return result
def filter_config_name_and_values(excluded_key_list, key_indices):
temp_results = {}
for key, key_index_list in key_indices.items():
for key_index in key_index_list:
if key not in excluded_key_list and key_index.value not in excluded_key_list:
temp_results[key] = key_index
return temp_results
def merge_configs(indices_configs: Dict[str, tuple], cli_file_configs: Dict[str, Dict]) -> Dict[str, tuple]:
"""
Merge configurations from indices_configs and cli_file_configs.
Args:
indices_configs (Dict[str, tuple]): A dictionary containing indices and configurations.
cli_file_configs (Dict[str, Dict]): A dictionary containing CLI configurations.
Returns:
Dict[str, tuple]: A dictionary containing merged configurations.
"""
merged = {}
for file, (config, excluded_key_list, key_indices) in indices_configs.items():
basename = os.path.basename(file)
if len(key_indices) > 0:
# CLI could be use absolute path as well, try that first, not found, then use base name
cli_configs = cli_file_configs.get(file, None)
if not cli_configs:
cli_configs = cli_file_configs.get(basename, None)
if cli_configs:
for key, cli_value in cli_configs.items():
if key not in key_indices:
# not every client has app_config, app_script
if key not in ["app_script", "app_config"]:
raise ValueError(f"Invalid config key: '{key}' for file '{file}'")
else:
indices = key_indices.get(key)
for key_index in indices:
value_type = type(key_index.value)
new_value = value_type(cli_value) if key_index.value is not None else cli_value
key_index.value = new_value
parent_key = key_index.parent_key
if parent_key and isinstance(parent_key.value, ConfigTree):
parent_key.value.put(key_index.key, new_value)
merged[basename] = (config, excluded_key_list, key_indices)
return merged
def get_root_index(key_index: KeyIndex) -> Optional[KeyIndex]:
if key_index is None or key_index.parent_key is None:
return key_index
if key_index.parent_key is not None:
if key_index.parent_key.parent_key is None or key_index.parent_key.parent_key.key == "":
return key_index.parent_key
else:
return get_root_index(key_index.parent_key)
return None
def get_cli_config(cmd_args: Any) -> Dict[str, Dict[str, str]]:
"""
Extract configurations from command-line arguments and return them in a dictionary.
Args:
cmd_args: Command-line arguments containing configuration data.
Returns:
A dictionary containing the configurations extracted from the command-line arguments.
"""
cli_config_dict = {}
if cmd_args.config_file:
cli_configs = cmd_args.config_file
cli_config_dict = parse_cli_config(cli_configs)
if "script" in cmd_args and cmd_args.script:
script = os.path.basename(cmd_args.script)
if "config_fed_client.conf" in cli_config_dict:
cli_config_dict["config_fed_client.conf"].update({"app_script": script})
else:
cli_config_dict["config_fed_client.conf"] = {"app_script": script}
return cli_config_dict
def parse_cli_config(cli_configs: List[str]) -> Dict[str, Dict[str, str]]:
"""
Extract configurations from command-line arguments and return them in a dictionary.
Args:
cli_configs: Array of CLI config option in the format of
filename key1=v1 key2=v2
separated by space
Returns:
A dictionary containing the configurations extracted from the command-line arguments.
"""
cli_config_dict = {}
if cli_configs:
for arr in cli_configs:
config_file = os.path.basename(arr[0])
config_data = arr[1:]
config_dict = {}
for conf in config_data:
conf_key_value = conf.split("=")
if len(conf_key_value) != 2:
raise ValueError(f"Invalid config data: {conf}")
conf_key, conf_value = conf_key_value
config_dict[conf_key] = conf_value
cli_config_dict[config_file] = config_dict
return cli_config_dict
def build_config_file_indices(config_dir: str) -> Dict[str, Tuple]:
excluded = ["info"]
included = ["config_fed_client", "config_fed_server", "meta"]
config_extensions = ConfigFormat.extensions()
config_file_index = {}
config_files = []
for root, _, files in os.walk(config_dir):
for f in files:
tokens = os.path.splitext(f)
name_wo_ext = tokens[0]
ext = tokens[1]
if (
ext in config_extensions
and not f.startswith("._")
and name_wo_ext in included
and name_wo_ext not in excluded
):
config_files.append(f)
for f in config_files:
f = str(os.path.abspath(os.path.join(root, f)))
if os.path.isfile(f):
real_path, config, excluded_key_list, key_indices = build_reverse_order_index(str(f))
config_file_index[real_path] = (config, excluded_key_list, key_indices)
return config_file_index
| NVFlare-main | nvflare/tool/job/config/configer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/tool/job/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import inspect
from typing import Any, Dict, List, Optional, Tuple, Union
from pyhocon import ConfigFactory as CF
from pyhocon import ConfigTree
from nvflare.fuel.utils.config import Config, ConfigFormat
from nvflare.fuel.utils.config_factory import ConfigFactory
from nvflare.fuel.utils.import_utils import optional_import
@dataclasses.dataclass
class KeyIndex:
key: str
value: Union[None, Any, ConfigTree] = None
parent_key: Optional["KeyIndex"] = None
index: Optional[int] = None
component_name: Optional[str] = None
def build_reverse_order_index(config_file_path: str) -> Tuple:
config, config_file_path = load_pyhocon_conf(config_file_path)
components: list = config.get("components", None)
excluded_list = [comp.get("id") for comp in components] if components else []
excluded_list.extend(
[
"name",
"path",
"id",
"format_version",
"tasks",
"task_name",
"train_task_name",
"submit_model_task_name",
"validation_task_name",
"validate_task_name",
"task_data_filters",
"task_result_filters",
"exchange_path",
"job_folder_name",
"json_encoder_path",
]
)
key_indices = build_dict_reverse_order_index(config, excluded_keys=[])
key_indices = add_default_values(excluded_list, key_indices)
populate_key_component_names(key_indices)
return config_file_path, config, excluded_list, key_indices
def load_pyhocon_conf(config_file_path) -> Tuple[ConfigTree, str]:
try:
temp_conf: Config = ConfigFactory.load_config(config_file_path)
if temp_conf:
config_file_path = temp_conf.file_path
if temp_conf.format == ConfigFormat.PYHOCON:
config: ConfigTree = temp_conf.conf
else:
config: ConfigTree = CF.from_dict(temp_conf.to_dict())
else:
raise ValueError(f"Config is None for file:'{config_file_path}'.")
except Exception as e:
raise RuntimeError(f"filed to parse file {config_file_path}:", e)
return config, config_file_path
def build_list_reverse_order_index(
config_list: List,
key: str,
excluded_keys: Optional[List[str]],
root_index: Optional[KeyIndex],
key_indices: Optional[Dict],
) -> Dict:
"""
Recursively build a reverse order index for a list.
"""
if excluded_keys is None:
excluded_keys = []
if key_indices is None:
key_indices = {}
for index, value in enumerate(config_list):
elmt_key = f"{key}[{index}]"
key_index = KeyIndex(key=elmt_key, value=value, parent_key=root_index, index=index)
if isinstance(value, list):
if len(value) > 0:
key_indices = build_list_reverse_order_index(
config_list=value,
key=elmt_key,
excluded_keys=excluded_keys,
root_index=key_index,
key_indices=key_indices,
)
else:
add_to_indices(elmt_key, key_index, key_indices)
if key == "name":
key_index.component_name = value
elif isinstance(value, ConfigTree):
key_indices = build_dict_reverse_order_index(
config=value, excluded_keys=excluded_keys, root_index=key_index, key_indices=key_indices
)
elif is_primitive(value):
if key == "path":
last_dot_index = value.rindex(".")
class_name = value[last_dot_index + 1 :]
key_index.component_name = class_name
elif key == "name":
key_index.component_name = value
add_to_indices(elmt_key, key_index, key_indices)
else:
raise RuntimeError(f"Unhandled data type: {type(value)}")
return key_indices
def is_primitive(value):
return isinstance(value, int) or isinstance(value, float) or isinstance(value, str) or isinstance(value, bool)
def has_none_primitives_in_list(values: List):
return any(not is_primitive(x) for x in values)
def build_dict_reverse_order_index(
config: ConfigTree,
excluded_keys: List[str] = None,
root_index: Optional[KeyIndex] = None,
key_indices: Optional[Dict] = None,
) -> Dict:
key_indices = {} if key_indices is None else key_indices
if excluded_keys is None:
excluded_keys = []
root_index = KeyIndex(key="", value=config, parent_key=None, index=None) if root_index is None else root_index
for key, value in config.items():
if key in excluded_keys:
continue
if value in excluded_keys:
continue
key_index = KeyIndex(key=key, value=value, parent_key=root_index, index=None)
if isinstance(value, list):
if len(value) > 0 and has_none_primitives_in_list(value):
key_indices = build_list_reverse_order_index(
config_list=value,
key=key,
excluded_keys=excluded_keys,
root_index=key_index,
key_indices=key_indices,
)
else:
add_to_indices(key, key_index, key_indices)
elif isinstance(value, ConfigTree):
key_indices = build_dict_reverse_order_index(
config=value, excluded_keys=excluded_keys, root_index=key_index, key_indices=key_indices
)
elif is_primitive(value):
parent_key = key_index.parent_key
if key == "path":
last_dot_index = value.rindex(".")
class_name = value[last_dot_index + 1 :]
key_index.component_name = class_name
parent_key.component_name = key_index.component_name if parent_key.index is not None else None
elif key == "name":
key_index.component_name = value
parent_key.component_name = key_index.component_name if parent_key.index else None
add_to_indices(key, key_index, key_indices)
else:
raise RuntimeError(f"Unhandled data type: {type(value)}")
return key_indices
def add_to_indices(key, key_index, key_indices):
indices = key_indices.get(key, [])
if key_index not in indices:
indices.append(key_index)
key_indices[key] = indices
def add_class_defaults_to_key(excluded_keys, key_index, key_indices, results):
if key_index is None or key_index.key != "path":
return
parent_key: KeyIndex = key_index.parent_key
value = key_index.value
last_dot_index = value.rindex(".")
class_path = value[:last_dot_index]
class_name = value[last_dot_index + 1 :]
module, import_flag = optional_import(module=class_path, name=class_name)
if import_flag:
params = inspect.signature(module.__init__).parameters
args_config = None
if parent_key and parent_key.value and isinstance(parent_key.value, ConfigTree):
args_config = parent_key.value.get("args", None)
for v in params.values():
if (
v.name != "self"
and v.default is not None
and v.name not in excluded_keys
and v.default not in excluded_keys
):
name_key = None
arg_key = KeyIndex(
key="args", value=args_config, parent_key=parent_key, component_name=key_index.component_name
)
if isinstance(v.default, str):
if len(v.default) > 0:
name_key = KeyIndex(
key=v.name,
value=v.default,
parent_key=arg_key,
component_name=key_index.component_name,
)
elif type(v.default) != type:
name_key = KeyIndex(
key=v.name,
value=v.default,
parent_key=arg_key,
component_name=key_index.component_name,
)
if name_key:
name_indices: List[KeyIndex] = key_indices.get(v.name, [])
has_one = any(
k.parent_key is not None
and k.parent_key.key == "args"
and k.parent_key.parent_key.key == key_index.parent_key.key
for k in name_indices
)
if not has_one:
name_indices.append(name_key)
results[v.name] = name_indices
def update_index_comp_name(key_index: KeyIndex):
parent_key = key_index.parent_key
if parent_key is None:
return key_index
if not isinstance(key_index, KeyIndex):
return key_index
if parent_key.key == "args":
grand_parent = parent_key.parent_key
key_index.component_name = grand_parent.component_name
update_index_comp_name(parent_key)
return key_index
def add_default_values(excluded_keys, key_indices: Dict):
results = key_indices.copy()
for key, key_index_list in key_indices.items():
for key_index in key_index_list:
if key_index:
add_class_defaults_to_key(excluded_keys, key_index, key_indices, results)
return results
def populate_key_component_names(key_indices: Dict):
results = {}
for key, key_index_list in key_indices.items():
for key_index in key_index_list:
if key_index:
key_index = update_index_comp_name(key_index)
key_index.component_name = "" if key_index.component_name is None else key_index.component_name
results[key] = key_index
return results
| NVFlare-main | nvflare/tool/job/config/config_indexer.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from setuptools import setup, find_packages
from cutensor.package_info import __version__
from cutensor.package_info import __package_name__
from cutensor.package_info import __homepage__
from cutensor.package_info import __download_url__
from cutensor.package_info import __description__
from cutensor.package_info import __license__
from cutensor.c_extensions import CustomExtension
setup(name=__package_name__,
version=__version__,
description=__description__,
url=__homepage__,
download_url=__download_url__,
license=__license__,
packages=find_packages(),
ext_modules=CustomExtension.modules)
| CUDALibrarySamples-master | cuTENSOR/python/setup.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
MAJOR = 0
MINOR = 1
PATCH = 0
VERSION = (MAJOR, MINOR, PATCH)
__version__ = '.'.join(map(str, VERSION))
__package_name__ = 'cutensor-python'
__description__ = 'PyTorch and Tensorflow Python bindings for cuTENSOR',
__homepage__ = 'https://developer.nvidia.com/cutensor',
__download_url__ = 'https://github.com/NVIDIA/CUDALibrarySamples/tree/master/cuTENSOR/cutensor',
__license__ = 'BSD'
| CUDALibrarySamples-master | cuTENSOR/python/cutensor/package_info.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from setuptools import Extension
from distutils.spawn import find_executable
import os
import subprocess
import re
__all__ = ['CustomExtension']
include_dirs = []
library_dirs = []
cuda_nvcc = find_executable('nvcc')
cuda_root = os.path.join(os.path.dirname(cuda_nvcc), os.pardir)
cuda_version = re.search(
r'release ([^,]*),',
subprocess.check_output([cuda_nvcc, '--version']).decode('utf-8')).group(1)
include_dirs.append(os.path.join(cuda_root, 'include'))
library_dirs.append(os.path.join(cuda_root, 'lib64'))
if 'CUTENSOR_ROOT' in os.environ:
root = os.environ['CUTENSOR_ROOT']
include_dirs.append(os.path.join(root, 'include'))
library_dirs.append(os.path.join(root, 'lib'))
library_dirs.append(os.path.join(root, 'build/lib'))
versioned_path = os.path.join(root, 'lib', cuda_version)
if not os.path.exists(versioned_path):
versioned_path = os.path.join(root, 'lib', cuda_version.split('.')[0])
library_dirs.append(versioned_path)
class CustomExtension:
modules = []
@classmethod
def Torch(cls, name, sources):
try:
import torch
from torch.utils.cpp_extension import CUDAExtension
ext = CUDAExtension(name,
sources=sources,
libraries=['cutensor'],
define_macros=[
('TORCH_API_INCLUDE_EXTENSION_H',),
('TORCH_EXTENSION_NAME',
name.split('.')[-1]),
('_GLIBCXX_USE_CXX11_ABI',
str(int(torch._C._GLIBCXX_USE_CXX11_ABI)))
],
extra_compile_args=['-std=c++14', '-fopenmp'],
extra_link_args=['-std=c++14', '-fopenmp'],
include_dirs=include_dirs,
library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
cls.modules.append(ext)
return ext
except ImportError:
return None
@classmethod
def Tensorflow(cls, name, sources):
try:
import tensorflow as tf
ext = Extension(name,
sources=sources,
libraries=['cutensor', 'cudart'],
extra_compile_args=tf.sysconfig.get_compile_flags(),
extra_link_args=tf.sysconfig.get_link_flags() +
tf.sysconfig.get_compile_flags(),
define_macros=[('GOOGLE_CUDA', '1')],
include_dirs=include_dirs,
library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
cls.modules.append(ext)
except ImportError:
return None
| CUDALibrarySamples-master | cuTENSOR/python/cutensor/c_extensions_utils.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from cutensor.c_extensions_utils import CustomExtension
einsum_torch = CustomExtension.Torch('cutensor.torch.binding',
sources=['cutensor/torch/einsum.cc'])
einsum_tf = CustomExtension.Tensorflow(
'cutensor.tensorflow.binding',
sources=[
'cutensor/tensorflow/einsum_kernel.cc',
'cutensor/tensorflow/einsum_ops.cc',
'cutensor/tensorflow/einsum_module.cc'
])
| CUDALibrarySamples-master | cuTENSOR/python/cutensor/c_extensions.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from cutensor.package_info import __version__
from cutensor.package_info import __package_name__
from cutensor.package_info import __homepage__
from cutensor.package_info import __download_url__
from cutensor.package_info import __description__
from cutensor.package_info import __license__
| CUDALibrarySamples-master | cuTENSOR/python/cutensor/__init__.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
def normalize_subscript(subscript):
if '->' in subscript:
subscript = subscript.split('->')
lhs = subscript[0]
rhs = subscript[1]
else:
lhs = subscript
rhs = ''.join(sorted([s for s in set(subscript) if s != ',' and subscript.count(s) == 1]))
if '...' in lhs:
raise RuntimeError('Elipsis is currently unsupported')
return lhs + '->' + rhs, ',' in lhs
| CUDALibrarySamples-master | cuTENSOR/python/cutensor/common.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
import unittest
from parameterized import parameterized
from parameterized import param
import cutensor.torch as cutensor
class EinsumTest(unittest.TestCase):
@parameterized.expand(
# yapf: disable
[
param(
"test 0",
a_size=(48, 37),
b_size=(37, 74),
equation="ik,kj->ij",
dtype=torch.float32,
),
param(
"test 0 (complex)",
a_size=(50, 50),
b_size=(50, 50),
equation="ik,kj->ij",
dtype=torch.complex64,
),
param(
"test 1",
a_size=(50, 50, 50),
b_size=(50, 50, 50),
equation="lik,lkj->lij",
dtype=torch.complex128,
),
param(
"test 2",
a_size=(50, 50, 50, 20),
b_size=(50, 50, 50, 20),
equation="likm,lkjm->lij",
dtype=torch.float32,
),
param(
"test 3",
a_size=(20, 50, 50, 50),
b_size=(50, 50, 50, 20),
equation="mlik,lkjm->lij",
dtype=torch.float32,
),
param(
"test 4",
a_size=(50, 50),
b_size=(50, 50),
equation="ik,kj->ij",
dtype=torch.float16,
),
param("test 5",
a_size=(50, 50, 50),
b_size=(50, 50, 50),
equation="lik,lkj->lij",
dtype=torch.float16),
param(
"test 6",
a_size=(50, 50, 50, 20),
b_size=(50, 50, 50, 20),
equation="likm,lkjm->lij",
dtype=torch.float16,
),
param(
"test 7",
a_size=(20, 50, 50, 50),
b_size=(50, 50, 50, 20),
equation="mlik,lkjm->lij",
dtype=torch.float16,
),
param(
"test 8",
a_size=(2, 5, 50, 2),
b_size=(5, 2, 50, 2),
equation="mlik,lkjm",
dtype=torch.float64,
),
# Activate when cuTENSOR supports it
# param(
# "test 8",
# a_size=(20, 50, 50, 50),
# b_size=(50, 50, 50, 20),
# equation="mlik,lkjm->lij",
# dtype=torch.bfloat16,
# ),
]
# yapf: enable
)
def test_einsum_equivalent_results(self,
_,
a_size,
b_size,
equation,
dtype=torch.float32):
kwargs = {
'dtype': dtype,
'device': torch.device("cuda"),
'requires_grad': True
}
torch.manual_seed(0)
cutensor_A = torch.randn(*a_size, **kwargs)
cutensor_B = torch.randn(*b_size, **kwargs)
cutensor_rslt = cutensor.EinsumFunction.apply(equation, cutensor_A,
cutensor_B)
cutensor_rslt.backward(torch.ones_like(cutensor_rslt))
cutensor_rslt = cutensor_rslt
cutensor_A_grad = cutensor_A.grad
cutensor_B_grad = cutensor_B.grad
torch_A = cutensor_A.clone().detach().requires_grad_(True)
torch_B = cutensor_B.clone().detach().requires_grad_(True)
torch_rslt = torch.einsum(equation, torch_A, torch_B)
torch_rslt.backward(torch.ones_like(torch_rslt))
torch_A_grad = torch_A.grad
torch_B_grad = torch_B.grad
torch_rslt = torch_rslt
self.assertEqual(cutensor_rslt.shape, torch_rslt.shape)
self.assertEqual(cutensor_A_grad.shape, torch_A_grad.shape)
self.assertEqual(cutensor_B_grad.shape, torch_B_grad.shape)
torch.testing.assert_allclose(cutensor_rslt, torch_rslt, rtol=5e-3, atol=6e-3)
torch.testing.assert_allclose(cutensor_A_grad, torch_A_grad, rtol=5e-3, atol=6e-3)
torch.testing.assert_allclose(cutensor_B_grad, torch_B_grad, rtol=5e-3, atol=6e-3)
@parameterized.expand(
# yapf: disable
[
param(
"test 0",
sizes=[(50, 60), (60, 40)],
equation="ik,kj->ji",
dtype=torch.float32,
),
param(
"test 1",
sizes=[(50, 60), (60, 7), (7, 8)],
equation="ik,kl,lj->ij",
dtype=torch.float32,
),
param(
"test 2",
sizes=[(50, 60), (60, 7), (7, 8)],
equation="ik,kl,lj",
dtype=torch.float32,
),
param(
"test 3",
sizes=[(50, 60), (60, 7), (7, 8)],
equation="ik,kl,lj->ij",
dtype=torch.complex64,
),
param(
"test 3",
sizes=[(50, 60), (60, 7), (7, 8)],
equation="ik,kl,lj->ij",
dtype=torch.complex64,
),
# single input currently not supported
param(
"test 4",
sizes=[(50, 60)],
equation="ij->ji",
dtype=torch.float32,
),
]
# yapf: enable
)
def test_einsum_general_equivalent_results(self,
_,
sizes,
equation,
dtype=torch.float32):
kwargs = {
'dtype': dtype,
'device': torch.device("cuda"),
'requires_grad': True
}
cutensor_tensors = [torch.randn(*size, **kwargs) for size in sizes]
torch_tensors = [
t.clone().detach().requires_grad_(True) for t in cutensor_tensors
]
cutensor_rslt = cutensor.EinsumGeneral(equation, *cutensor_tensors)
cutensor_rslt.backward(torch.ones_like(cutensor_rslt))
cutensor_rslt = cutensor_rslt
cutensor_grads = [
t.grad for t in cutensor_tensors
]
torch_rslt = torch.einsum(equation, *torch_tensors)
torch_rslt.backward(torch.ones_like(torch_rslt))
torch_rslt = torch_rslt
torch_grads = [t.grad for t in torch_tensors]
self.assertEqual(cutensor_rslt.shape, torch_rslt.shape)
for ct, tt in zip(cutensor_grads, torch_grads):
self.assertEqual(ct.shape, tt.shape)
torch.testing.assert_allclose(cutensor_rslt, torch_rslt, rtol=5e-3, atol=5e-3)
for ct, tt in zip(cutensor_grads, torch_grads):
torch.testing.assert_allclose(ct, tt, rtol=5e-3, atol=5e-3)
if __name__ == '__main__':
unittest.main()
| CUDALibrarySamples-master | cuTENSOR/python/cutensor/torch/einsum_test.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
import torch.autograd
import numpy as np
from .binding import einsum
from ..common import normalize_subscript
class EinsumFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, equation, input_0, input_1=None):
equation, isBinary = normalize_subscript(equation)
if isBinary and input_1 is None:
raise RuntimeError('The subscript indicates two inputs, but only one was passed')
if not isBinary and input_1 is not None:
raise RuntimeError('The subscript indicates one input, but two were passed')
if input_1 is None:
input_1 = input_0.new_empty((1,))
output = einsum(equation, input_0, input_1, False, False)
if isBinary:
ctx.save_for_backward(input_0, input_1)
ctx.equation = equation
ctx.isBinary = isBinary
return output
@staticmethod
def backward(ctx, grad_output):
equation = ctx.equation
lhs, modeC = equation.split('->')
if ctx.isBinary:
input_0, input_1 = ctx.saved_tensors
conjugate = False
if torch.is_complex(input_0) or torch.is_complex(input_1):
conjugate = True
modeA, modeB = lhs.split(',')
d_input_0 = einsum(modeC + ',' + modeB + '->' + modeA, grad_output,
input_1, False, conjugate)
d_input_1 = einsum(modeA + ',' + modeC + '->' + modeB, input_0,
grad_output, conjugate, False)
return None, d_input_0, d_input_1
else:
dummy = grad_output.new_empty((1,))
d_input = einsum(modeC + '->' + lhs, grad_output, dummy, False, False)
return None, d_input
class Einsum(torch.nn.Module):
def __init__(self, equation):
super(Einsum, self).__init__()
self.equation = equation
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, input_0, input_1):
return EinsumFunction.apply(self.equation, input_0, input_1)
def _compute_target_tensor(in0, in1, target, eqs):
remaining = target + ''.join(eqs)
result = ""
for m in in0[:-1] + in1[:-1] + in1[-1] + in0[-1]:
if m in remaining and m not in result:
result += m
# reorder target modes like target
result = list(result)
for i in range(len(result)):
if result[i] not in target: continue
for j in range(i):
if result[j] not in target: continue
if target.index(result[j]) > target.index(result[i]):
result[i], result[j] = result[j], result[i]
return ''.join(result)
def EinsumGeneral(equation, *tensors, **kwargs):
tensors = list(tensors)
equation, isBinary = normalize_subscript(equation)
path = np.einsum_path(equation,
*[np.broadcast_to(np.nan, t.shape) for t in tensors],
**kwargs)
path = path[0][1:]
equation = equation.split('->')
eqs = equation[0].split(',')
target = equation[1]
for step in path:
if len(step) == 1:
result = EinsumFunction.apply(eqs[0] + '->' + target, tensors[0])
continue
assert step[0] < step[1]
in0 = tensors[step[0]]
in1 = tensors[step[1]]
tensors.pop(step[1])
tensors.pop(step[0])
eq0 = eqs[step[0]]
eq1 = eqs[step[1]]
eqs.pop(step[1])
eqs.pop(step[0])
tgt = _compute_target_tensor(eq0, eq1, target, eqs)
assert tgt != ""
eq = eq0 + ',' + eq1 + '->' + tgt
eqs.append(tgt)
result = EinsumFunction.apply(eq, in0, in1)
tensors.append(result)
return result
| CUDALibrarySamples-master | cuTENSOR/python/cutensor/torch/einsum.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from .einsum import einsum, EinsumFunction, EinsumGeneral, Einsum
| CUDALibrarySamples-master | cuTENSOR/python/cutensor/torch/__init__.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from parameterized import parameterized
from parameterized import param
import tensorflow as tf
from tensorflow.python.platform import test
import tensorflow.test
import cutensor.tensorflow as cutensor
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class EinsumcuTENSORTest(tensorflow.test.TestCase):
@parameterized.expand(
# yapf: disable
[
param(
"test 0",
a_size=(50, 50),
b_size=(50, 50),
equation="ik,kj->ij",
dtype=tf.float32,
),
param(
"test 1",
a_size=(50, 50, 50),
b_size=(50, 50, 50),
equation="lik,lkj->lij",
dtype=tf.float32,
),
param(
"test 2",
a_size=(50, 50, 50, 20),
b_size=(50, 50, 50, 20),
equation="likm,lkjm->lij",
dtype=tf.float32,
),
param(
"test 3",
a_size=(20, 50, 50, 50),
b_size=(50, 50, 50, 20),
equation="mlik,lkjm->lij",
dtype=tf.float32,
),
param(
"test 4",
a_size=(50, 50),
b_size=(50, 50),
equation="ik,kj->ij",
dtype=tf.float16,
),
param("test 5",
a_size=(50, 50, 50),
b_size=(50, 50, 50),
equation="lik,lkj->lij",
dtype=tf.float16),
param(
"test 6",
a_size=(50, 50, 50, 20),
b_size=(50, 50, 50, 20),
equation="likm,lkjm->lij",
dtype=tf.float16,
),
param(
"test 7",
a_size=(20, 50, 50, 50),
b_size=(50, 50, 50, 20),
equation="mlik,lkjm->lij",
dtype=tf.float16,
),
param(
"test 8",
a_size=(2, 5, 5, 5),
b_size=(5, 5, 5, 2),
equation="mlik,lkjm",
dtype=tf.float16,
),
param(
"test 9",
a_size=(20, 50, 50, 50),
b_size=None,
equation="mlik->imlk",
dtype=tf.float16,
),
# Activate when cuTENSOR supports it
# param(
# "test 8",
# a_size=(20, 50, 50, 50),
# b_size=(50, 50, 50, 20),
# equation="mlik,lkjm->lij",
# dtype=tf.bfloat16,
# ),
]
# yapf: enable
)
def test_einsum_equivalent_results(self,
_,
a_size,
b_size,
equation,
dtype=tf.float32):
A = tf.compat.v1.get_variable("A",
shape=a_size,
initializer=tf.random_normal_initializer,
dtype=dtype)
if b_size is not None:
B = tf.compat.v1.get_variable("B",
shape=b_size,
initializer=tf.random_normal_initializer,
dtype=dtype)
tf_native_rslt = tf.einsum(equation, A, B, name="tf_native_einsum")
tf_native_grads = tf.gradients(tf_native_rslt, [A, B])
tf_cutensor_rslt = cutensor.einsum(equation,
A,
B,
name="tf_cuTensor_einsum")
tf_cutensor_grads = tf.gradients(tf_cutensor_rslt, [A, B])
else:
tf_native_rslt = tf.einsum(equation, A, name="tf_native_einsum")
tf_native_grads = tf.gradients(tf_native_rslt, [A])
tf_cutensor_rslt = cutensor.einsum(equation,
A,
name="tf_cuTensor_einsum")
tf_cutensor_grads = tf.gradients(tf_cutensor_rslt, [A])
self.assertEqual(tf_native_rslt.get_shape(),
tf_cutensor_rslt.get_shape())
self.assertEqual(tf_native_rslt.dtype, tf_cutensor_rslt.dtype)
self.assertEqual(len(tf_cutensor_grads), len(tf_native_grads))
with self.session(use_gpu=True) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
self.assertAllClose(tf_native_rslt,
tf_cutensor_rslt,
rtol=5e-03,
atol=5e-03)
for tf_native_grad, tf_cutensor_grad in zip(tf_native_grads,
tf_cutensor_grads):
self.assertAllClose(tf_native_grad,
tf_cutensor_grad,
rtol=5e-03,
atol=5e-03)
self.assertEqual(tf_native_grad.dtype, tf_cutensor_grad.dtype)
if __name__ == '__main__':
test.main()
| CUDALibrarySamples-master | cuTENSOR/python/cutensor/tensorflow/einsum_test.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.framework.load_library import load_op_library
from ..common import normalize_subscript
import glob
import os
pattern = os.path.join(os.path.dirname(__file__), 'binding*.so')
glob_res = glob.glob(pattern)
binding_file, = glob_res
einsum_lib = tf.load_op_library(binding_file)
def einsum(equation, *inputs, **kwargs):
name = kwargs.pop('name', None)
if kwargs:
raise TypeError(
'invalid keyword arguments for this function: ' +
', '.join([format(key) for key in sorted(list(kwargs.keys()))]))
with ops.name_scope(name, 'einsum', [equation, inputs]):
inputs = list(inputs)
input_shapes = [x.get_shape() for x in inputs]
input_axis_labels, output_axis_labels = special_math_ops._einsum_parse_and_resolve_equation(
equation, input_shapes)
axis_labels = set(''.join(input_axis_labels) + output_axis_labels)
for a in axis_labels:
for input_labels in input_axis_labels:
if (len(input_axis_labels) == 1 and
input_labels.count(a) == 2 and
input_labels == input_labels[::-1] and
'->' not in equation):
return math_ops.trace(inputs[0])
if input_labels.count(a) > 1:
raise ValueError(
'Subscript not supported: an axis appears more than once: %s'
% input_labels)
for a in axis_labels:
input_count = sum(1 for s in input_axis_labels if a in s)
if input_count > 2 and a not in output_axis_labels:
tf.logging.warn(
'Falling back to exponential-space implementation of einsum()'
' because index "%s" is summed over more than two inputs.',
a)
return special_math_ops._exponential_space_einsum(
equation, *inputs)
equation = ','.join(input_axis_labels) + '->' + output_axis_labels
if len(inputs) == 1:
# inputs.append(inputs[0])
inputs.append(tf.constant([0], dtype=inputs[0].dtype))
return einsum_lib.einsum_cu_tensor(input_0=inputs[0],
input_1=inputs[1],
equation=equation)
@ops.RegisterGradient("EinsumCuTensor")
def _einsum_cu_tensor_grad(op, grad):
A = op.inputs[0]
B = op.inputs[1]
subscript, _ = normalize_subscript(op.get_attr("equation").decode())
lhs, modeC = subscript.split('->')
if ',' in lhs:
modeA, modeB = lhs.split(',')
grad_A = einsum_lib.einsum_cu_tensor(input_0=grad,
input_1=B,
equation=modeC + ',' + modeB + '->' +
modeA)
grad_B = einsum_lib.einsum_cu_tensor(input_0=A,
input_1=grad,
equation=modeA + ',' + modeC + '->' +
modeB)
return [grad_A, grad_B]
else:
grad = einsum_lib.einsum_cu_tensor(input_0=grad,
input_1=B,
equation=modeC + '->' + lhs)
return [grad, B]
| CUDALibrarySamples-master | cuTENSOR/python/cutensor/tensorflow/einsum.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from .einsum import einsum
| CUDALibrarySamples-master | cuTENSOR/python/cutensor/tensorflow/__init__.py |
import argparse
import time
import cupy
import numpy as np
import matplotlib.pyplot as plt
import tifffile
import nvtiff
parser = argparse.ArgumentParser()
parser.add_argument('tiff_file', type=str, help='tiff file to decode.')
parser.add_argument('-o', '--output_file_prefix', type=str, default=None, help='Output file prefix to save decoded data. Will save one file per image in tiff file.')
parser.add_argument('-s', '--return_single_array', action='store_true', help='Return single array from nvTiff instead of list of arrays')
parser.add_argument('-c', '--check_output', action='store_true', help='Compare nvTiff output to reference CPU result')
parser.add_argument('-p', '--use_pinned_mem', action='store_true', help='Read TIFF data from pinned memory.')
parser.add_argument('-r', '--subfile_range', type=str, default=None, help='comma separated list of starting and ending file indices to decode, inclusive')
args = parser.parse_args()
print("Command line arguments:")
print(f"\ttiff_file: {args.tiff_file}")
print(f"\treturn_single_array: {args.return_single_array}")
print(f"\toutput_file_prefix: {args.output_file_prefix}")
print(f"\tcheck_output: {args.check_output}")
print(f"\tuse_pinned_mem: {args.use_pinned_mem}")
print(f"\tsubfile_range: {args.subfile_range}")
print()
subfile_range = None
if args.subfile_range:
subfile_range = [int(x) for x in args.subfile_range.split(',')]
# Create cupy array to initialize CUDA)
dummy = cupy.ndarray(1)
del dummy
# Read using tiffile and copy to GPU
cupy.cuda.get_current_stream().synchronize()
t0 = time.time()
ref_imgs = tifffile.imread(args.tiff_file)
t1 = time.time()
ref_imgs_gpu = cupy.asarray(ref_imgs)
cupy.cuda.get_current_stream().synchronize()
t2 = time.time()
print(f"Time for tifffile:")
print(f"\tdecode: {t1 - t0} s")
print(f"\th2d copy: {t2 - t1} s")
print(f"\ttotal: {t2 - t0} s")
# Read single nvTiff
cupy.cuda.get_current_stream().synchronize()
t0 = time.time()
f = nvtiff.nvTiffFile(0, args.tiff_file, use_pinned_mem=args.use_pinned_mem)
t1 = time.time()
nvTiff_imgs_gpu = nvtiff.decode(f, subfile_range = subfile_range, return_single_array=args.return_single_array)
cupy.cuda.get_current_stream().synchronize()
t2 = time.time()
print(f"Time for nvTiff:")
print(f"\topen: {t1 - t0} s")
print(f"\tdecode: {t2 - t1} s")
print(f"\ttotal: {t2 - t0} s")
print()
# Compare results
if args.check_output:
print(f"Checking output...")
if f.nsubfiles != 1 and subfile_range:
ref_imgs = ref_imgs[subfile_range[0]: subfile_range[1]+1,:,:]
if args.return_single_array:
nvTiff_imgs = nvTiff_imgs_gpu.get()
np.testing.assert_equal(ref_imgs, np.squeeze(nvTiff_imgs))
else:
nvTiff_imgs = [x.get() for x in nvTiff_imgs_gpu]
for i in range(len(nvTiff_imgs)):
if f.nsubfiles == 1:
np.testing.assert_equal(ref_imgs, np.squeeze(nvTiff_imgs[i]))
else:
np.testing.assert_equal(ref_imgs[i,:,:], np.squeeze(nvTiff_imgs[i]))
print(f"Output matches.")
if args.output_file_prefix:
print(f"Writing nvTiff outputs to {args.output_file_prefix}_*.png...")
if args.return_single_array:
nvTiff_imgs = nvTiff_imgs_gpu.get()
for i in range(nvTiff_imgs.shape[0]):
plt.imsave(f"{args.output_file_prefix}_{i}.png", nvTiff_imgs[i,:,:,:])
else:
nvTiff_imgs = [x.get() for x in nvTiff_imgs_gpu]
for i, nvTiff_img in enumerate(nvTiff_imgs):
plt.imsave(f"{args.output_file_prefix}_{i}.png", nvTiff_img)
| CUDALibrarySamples-master | nvTIFF/nvTIFF-Python-Example/nvtiff_test.py |
#!/usr/bin/env python
import codecs
import os
import subprocess
import sys
import distutils.sysconfig
import pybind11
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
HERE = os.path.dirname(os.path.realpath(__file__))
def read(*parts):
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
class CMakeBuildExt(build_ext):
def build_extensions(self):
cmake_python_library = "{}/{}".format(
distutils.sysconfig.get_config_var("LIBDIR"),
distutils.sysconfig.get_config_var("INSTSONAME"),
)
cmake_python_include_dir = distutils.sysconfig.get_python_inc()
install_dir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath("dummy"))
)
os.makedirs(install_dir, exist_ok=True)
cmake_args = [
"-DCMAKE_INSTALL_PREFIX={}".format(install_dir),
"-DPython_EXECUTABLE={}".format(sys.executable),
"-DPython_LIBRARIES={}".format(cmake_python_library),
"-DPython_INCLUDE_DIRS={}".format(cmake_python_include_dir),
"-DCMAKE_CUDA_ARCHITECTURES=70;80;90",
"-DNVSHMEM_HOME={}".format(HERE + "/nvshmem"),
"-DCUFFTMP_HOME={}".format(HERE + "/cufftmp"),
"-DCMAKE_BUILD_TYPE={}".format(
"Debug" if self.debug else "Release"
),
"-DCMAKE_PREFIX_PATH={}".format(pybind11.get_cmake_dir()),
]
os.makedirs(self.build_temp, exist_ok=True)
subprocess.check_call(
["cmake", f"{HERE}/src/cufftmp_jax/"] + cmake_args, cwd=self.build_temp
)
# Build all the extensions
super().build_extensions()
# Finally run install
subprocess.check_call(
["cmake", "--build", ".", "--target", "install"],
cwd=self.build_temp,
)
def build_extension(self, ext):
target_name = ext.name.split(".")[-1]
subprocess.check_call(
["cmake", "--build", ".", "--target", target_name],
cwd=self.build_temp,
)
extensions = [
Extension(
"cufftmp_jax.gpu_ops",
[
"src/cufftmp_jax/lib/gpu_ops.cpp",
"src/cufftmp_jax/lib/kernels.cu",
],
)]
setup(
name="fft_jax",
version='0.0.1',
author="Leopold Cambier",
author_email="lcambier@nvidia.com",
license="All rights reserved",
description=("FFT + JAX"),
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
install_requires=["jax[cuda]", "jaxlib"],
ext_modules=extensions,
cmdclass={"build_ext": CMakeBuildExt},
)
| CUDALibrarySamples-master | cuFFTMp/JAX_FFT/setup.py |
# -*- coding: utf-8 -*-
import time
import math
import numpy as np
import sys
import jax
import jax.numpy as jnp
from jax.experimental import maps
from jax.experimental.pjit import pjit
from fft_common import Dist, Dir
from cufftmp_jax import cufftmp
from xfft import xfft
import helpers
def main():
opt = helpers.parser()
# Initialize JAX for multi-process runs
if opt['multiprocess'] is not None:
if opt['multiprocess'] == 'bootstrap':
jax.distributed.initialize()
else:
coordinator, num_procs, my_proc = opt['multiprocess'].split(',')
jax.distributed.initialize(
coordinator_address=coordinator,
num_processes=int(num_procs),
process_id=int(my_proc)
)
fft_dims = opt['shape']
cycles = opt['cycles']
impl = opt['implementation']
if impl == "cufftmp":
dist_fft = cufftmp
elif impl == "xfft":
dist_fft = xfft
else:
raise ValueError(f"Wrong implementation: got {impl}, expected cufftmp or xfft")
dist = Dist.create(opt['dist'])
input_shape = dist.slab_shape(fft_dims)
dtype = jnp.complex64
mesh = maps.Mesh(np.asarray(jax.devices()), ('gpus',))
with jax.spmd_mode('allow_all'):
if opt['mode'] == "test":
seed = 170
key = jax.random.PRNGKey(seed)
input = jax.random.normal(key, shape=fft_dims, dtype=dtype)
with mesh:
fft = pjit(dist_fft,
in_axis_resources=None,
out_axis_resources=None,
static_argnums=[1, 2])
output = fft(input, dist, Dir.FWD)
output_ref = jnp.fft.fftn(input)
error = jnp.linalg.norm(output - output_ref) / \
jnp.linalg.norm(output_ref)
if jax.process_index() == 0:
print(f"{impl} (test): {fft_dims}, dist {dist} --> {dist.opposite}, num GPUs {jax.device_count()}, num processes {jax.process_count()}, L2 rel error {error:.2e}")
if error < 1e-4:
print("&&&& PASSED")
else:
print("&&&& FAILED")
sys.exit(1)
else:
with mesh:
# Performance testing only supports 1 device per process
# because of the way the input `dinput` is generated
assert jax.local_device_count() == 1
# Quick generation of the local array
input = jnp.ones(input_shape, dtype=dtype)
# Create the global sharded array
dinput = jax.make_array_from_single_device_arrays(
fft_dims,
jax.sharding.NamedSharding(mesh, dist.part_spec),
[input])
# Function to benchmark
def fwd_bwd(x, dist, dir):
return dist_fft(dist_fft(x, dist, dir),
dist.opposite,
dir.opposite)
fwd_bwd_pjit = pjit(fwd_bwd,
in_axis_resources=dist.part_spec,
out_axis_resources=dist.part_spec,
static_argnums=[1, 2])
def fwd_bwd_bench(x):
return fwd_bwd_pjit(x, dist, Dir.FWD)
# Warmup
x = fwd_bwd_bench(dinput).block_until_ready()
# Benchmark
start = time.time()
x = dinput
for _ in range(cycles):
x = fwd_bwd_bench(x)
doutput = x.block_until_ready()
stop = time.time()
# Check error
doutput_ref = dinput
error = helpers.Frob_error(doutput_ref, doutput, dist)
# Perf ?
time_s = stop - start
av_time_s = time_s / (2 * cycles)
perf_GFlops = \
(5 * math.prod(fft_dims) * math.log2(math.prod(fft_dims))) / 1e9 / av_time_s
bandwidth_GBsGPUdir = \
(8 * math.prod(fft_dims)) / jax.device_count() / 1e9 / av_time_s
if jax.process_index() == 0:
print(f"{impl} (perf): {fft_dims}, num GPUs {jax.device_count()}, num processes {jax.process_count()}, relative L2 error {error:.2e}, cycles {cycles}, time {av_time_s * 1e3:.2e} ms, perf {perf_GFlops:.2e} GFlop/s, bandwidth {bandwidth_GBsGPUdir:.2e} GB/s/GPU")
if error < 1e-4:
print("&&&& PASSED")
else:
print("&&&& FAILED")
sys.exit(1)
if __name__ == "__main__":
main()
| CUDALibrarySamples-master | cuFFTMp/JAX_FFT/tests/fft_test.py |
import argparse
import jax
from jax.experimental.pjit import pjit
from jax.experimental.maps import xmap
def Frob_error_impl(dtest, dref):
derr2 = jax.numpy.linalg.norm(dtest - dref) ** 2
dnorm2 = jax.numpy.linalg.norm(dref) ** 2
derr2_sum = jax.lax.psum(derr2, axis_name="gpus")
dnorm2_sum = jax.lax.psum(dnorm2, axis_name="gpus")
error = jax.numpy.sqrt(derr2_sum / dnorm2_sum)
return error
def Frob_error(dtest, dref, dist):
"""Computes the relative error in the Frobenius norm
Arguments:
dtest -- the test array, sharded along the axis `ngpus`
dref -- the reference array, sharded along the axis `ngpus`
dist -- the sharding of dtest and dref
Should be an instance of fft_common.Dist
Returns the relative error in the Frobenius norm, i.e.,
||dtest - dref||_F / ||dref||_F
"""
return pjit(
xmap(
Frob_error_impl,
in_axes=dist.axes_map,
out_axes={},
axis_resources={'gpus': 'gpus'}
),
in_axis_resources=dist.part_spec,
out_axis_resources=None
)(dtest, dref)
def parser():
parser = argparse.ArgumentParser(
description="Test program for distributed FFTs in JAX"
)
parser.add_argument(
"implementation",
type=str,
choices=['cufftmp', 'xfft'],
default='cufftmp',
help='uses cuFFTMp or pjit+xmap'
)
parser.add_argument(
"mode",
type=str,
choices=['test', 'perf'],
default='test',
help='test (correctness) or perf (performance)'
)
parser.add_argument(
"-x", "--xsize",
type=int,
help="Size along X",
default=1
)
parser.add_argument(
"-y", "--ysize",
type=int,
help="Size along Y",
default=1
)
parser.add_argument(
"-z", "--zsize",
type=int,
help="Size along Z",
default=None
)
parser.add_argument(
"-n", "--size",
type=int,
help="Size along X, Y and Z (takes precedence over xsize, ysize and zsize",
default=None
)
parser.add_argument(
"-c", "--cycles",
type=int,
help="Cycles to benchmark (perf only)",
default=10
)
parser.add_argument(
'-v', '--verbose',
action='count',
default=0,
help="Verbosity level (0 = silent, 2 = debug)"
)
parser.add_argument(
'-d', '--dist',
type=str,
choices=['X', 'Y'],
default='X',
help="Input distribution (X for SLABS_X or Y for SLABS_Y)"
)
parser.add_argument(
"--multiprocess",
type=str,
default=None,
help="If set, should be of the shape `coordinator,num_procs,proc_id` or `bootstrap` (automatic cluster detection)")
args = parser.parse_args()
if args.size:
shape = args.size, args.size, args.size
else:
if args.zsize:
shape = args.xsize, args.ysize, args.zsize
else:
shape = args.xsize, args.ysize
return {'shape': shape, **vars(args)}
| CUDALibrarySamples-master | cuFFTMp/JAX_FFT/tests/helpers.py |
# -*- coding: utf-8 -*-
from .utils import Dist, Dir
| CUDALibrarySamples-master | cuFFTMp/JAX_FFT/src/fft_common/__init__.py |
from enum import Enum
import jax
from jax.experimental import PartitionSpec
class Dist(Enum):
"""Describes a SLAB data decomposition
For a X*Y*Z array, SLABS_X indicates the array is
distributed along the first dimension, i.e., each
device owns a slab of size (X // nGPUs)*Y*Z
SLABS_Y indicates the array is distributed along the
second dimension, with each device owning a slab
of size X*(Y // nGPUs)*Z.
"""
SLABS_X = 'SLABS_X'
SLABS_Y = 'SLABS_Y'
@staticmethod
def create(string):
if string == 'X':
return Dist.SLABS_X
elif string == 'Y':
return Dist.SLABS_Y
else:
raise RuntimeError("Wrong dist")
@property
def opposite(dist):
if dist == Dist.SLABS_X:
return Dist.SLABS_Y
else:
return Dist.SLABS_X
@property
def _C_enum(dist):
if dist == Dist.SLABS_X:
return 0
else:
return 1
def fft_axes(self, fft_rank):
if self == Dist.SLABS_X:
return list(range(1, fft_rank))
else:
return [0]
def xmap_shape(self, fft_dims):
ngpus = jax.device_count()
if self == Dist.SLABS_X:
return (
ngpus,
fft_dims[0] // ngpus,
fft_dims[1],
*fft_dims[2:]
)
else:
return (
fft_dims[0],
ngpus,
fft_dims[1] // ngpus,
*fft_dims[2:]
)
def slab_shape(self, fft_dims):
ngpus = jax.device_count()
if self == Dist.SLABS_X:
return (
fft_dims[0] // ngpus,
fft_dims[1],
*fft_dims[2:]
)
else:
return (
fft_dims[0],
fft_dims[1] // ngpus,
*fft_dims[2:]
)
def fft_shape(self, local_shape):
ngpus = jax.device_count()
if self == Dist.SLABS_X:
return (local_shape[0] * ngpus, local_shape[1], *local_shape[2:])
else:
return (local_shape[0], local_shape[1] * ngpus, *local_shape[2:])
@property
def axes_map(dist):
if dist == Dist.SLABS_X:
return {0: "gpus"}
else:
return {1: "gpus"}
@property
def part_spec(dist):
if dist == Dist.SLABS_X:
return PartitionSpec("gpus", None)
else:
return PartitionSpec(None, "gpus")
class Dir(Enum):
"""Describe the FFT direction
FWD is the forward, unnormalized, direction.
BWD is the backward, normalized by 1/N, direction,
with N the product of the dimensions.
"""
FWD = 'FWD'
INV = 'INV'
@property
def _C_enum(dir):
if dir == Dir.FWD:
return 0
else:
return 1
@property
def opposite(dir):
if dir == Dir.FWD:
return Dir.INV
else:
return Dir.FWD
| CUDALibrarySamples-master | cuFFTMp/JAX_FFT/src/fft_common/utils.py |
from functools import partial
import jax
from jax._src.sharding import NamedSharding
from jax.experimental.custom_partitioning import custom_partitioning
from fft_common import Dir
def _fft(x, dist, dir):
""" Compute a local FFT along the appropriate axes (based on dist), in the
forward or backward direction """
if dir == Dir.FWD:
return jax.numpy.fft.fftn(x, axes=dist.fft_axes(len(x.shape)))
else:
return jax.numpy.fft.ifftn(x, axes=dist.fft_axes(len(x.shape)))
def _supported_sharding(sharding, dist):
return NamedSharding(sharding.mesh, dist.part_spec)
def _partition(arg_shapes,
arg_shardings,
result_shape,
result_sharding,
dist,
dir):
return lambda x: _fft(x, dist, dir), \
_supported_sharding(arg_shardings[0], dist), \
[_supported_sharding(arg_shardings[0], dist)]
def _infer_sharding_from_operands(arg_shapes,
arg_shardings,
result_shape,
dist,
dir):
return _supported_sharding(arg_shardings[0], dist)
def fft(x, dist, dir):
""" Extends jax.numpy.fft.fftn to support sharding along the first or
second direction, without intermediate re-sharding """
@custom_partitioning
def _fft_(x):
return _fft(x, dist, dir)
_fft_.def_partition(
infer_sharding_from_operands=partial(_infer_sharding_from_operands,
dist=dist,
dir=dir),
partition=partial(_partition, dist=dist, dir=dir))
return _fft_(x)
def xfft(x, dist, dir):
"""Compute the discrete Fourier transform using a JAX-only implementation.
Arguments:
x -- the input tensor
dist -- the data decomposition of x.
Should be an instance of fft_common.Dist
dir -- the direction of the transform.
Should be an instance of fft_common.Dir
Returns the transformed tensor.
The output tensoris distributed according to dist.opposite
This function should be used with pjit like
pjit(xfft,
in_axis_resources=dist.part_spec,
out_axis_resources=dist.opposite.part_spec,
static_argnums=[1, 2]
)(x, dist, dir)
"""
# If dist == Dist.SLABS_X, FFT along Y and Z
x = fft(x, dist, dir)
# Implicitly re-shards to match the required
# input sharding of the next fft(..., dist.opposite, ...)
# If dist == Dist.SLABS_X, FFT along X
x = fft(x, dist.opposite, dir)
return x
| CUDALibrarySamples-master | cuFFTMp/JAX_FFT/src/xfft/xfft.py |
# -*- coding: utf-8 -*-
from .xfft import xfft
| CUDALibrarySamples-master | cuFFTMp/JAX_FFT/src/xfft/__init__.py |
# -*- coding: utf-8 -*-
__all__ = ["cufftmp"]
from functools import partial
import math
import jax
from jax.lib import xla_client
from jax import core, dtypes
from jax.interpreters import xla, mlir
from jax.abstract_arrays import ShapedArray
from jax._src.sharding import NamedSharding
from jax.experimental.custom_partitioning import custom_partitioning
from jaxlib.hlo_helpers import custom_call
from fft_common import Dir, Dist
from . import gpu_ops
for _name, _value in gpu_ops.registrations().items():
xla_client.register_custom_call_target(_name, _value, platform="gpu")
xops = xla_client.ops
# ************
# * BINDINGS *
# ************
def _cufftmp_bind(input, num_parts, dist, dir):
# param=val means it's a static parameter
(output,) = _cufftmp_prim.bind(input,
num_parts=num_parts,
dist=dist,
dir=dir)
# scale in INVERSE direction
if dir == Dir.INV:
fft_dims = dist.fft_shape(input.shape)
output = jax.numpy.divide(output, math.prod([
jax.numpy.complex64(f) for f in fft_dims
]))
return output
def _supported_sharding(sharding, dist):
return NamedSharding(sharding.mesh, dist.part_spec)
def _partition(arg_shapes,
arg_shardings,
result_shape,
result_sharding,
dist,
dir):
""" Describes the required input and output sharding of the op.
`arg_shardings` and `result_sharding` are the shardings provided by the
user (i.e., in pjit).
Returns:
- The operation to perform locally on all GPUs
- The output sharding
- The input sharding """
return lambda x: _cufftmp_bind(x,
num_parts=jax.device_count(),
dist=dist,
dir=dir), \
_supported_sharding(arg_shardings[0], dist.opposite), \
[_supported_sharding(arg_shardings[0], dist)]
def _infer_sharding_from_operands(arg_shapes,
arg_shardings,
result_shape,
dist,
dir):
return _supported_sharding(arg_shardings[0], dist)
def cufftmp(x, dist, dir):
"""Compute the DFT using a JAX+cuFFTMp implementation.
Arguments:
x -- the input tensor
dist -- the data decomposition of x.
Should be an instance of fft_common.Dist
dir -- the direction of the transform.
Should be an instance of fft_common.Dir
Returns the transformed tensor.
The output tensoris distributed according to dist.opposite
This function should be used with pjit like
pjit(
cufftmp,
in_axis_resources=dist.part_spec,
out_axis_resources=dist.opposite.part_spec,
static_argnums=[1, 2]
)(x, dist, dir)
"""
# cuFFTMp only supports 1 device per proces
assert jax.local_device_count() == 1
@custom_partitioning
def _cufftmp_(x):
return _cufftmp_bind(x, num_parts=1, dist=dist, dir=dir)
_cufftmp_.def_partition(
infer_sharding_from_operands=partial(
_infer_sharding_from_operands,
dist=dist,
dir=dir),
partition=partial(
_partition,
dist=dist,
dir=dir))
return _cufftmp_(x)
# *********************************
# * SUPPORT FOR JIT COMPILATION *
# *********************************
# Abstract implementation, i.e., return the shape of the output array
# based on the input array and a number of partitions (ie devices)
def _cufftmp_abstract(input, num_parts, dist, dir):
dtype = dtypes.canonicalize_dtype(input.dtype)
input_shape = input.shape
if dist == Dist.SLABS_X:
output_shape = (input_shape[0] * num_parts,
input_shape[1] // num_parts,
*input_shape[2:])
elif dist == Dist.SLABS_Y:
output_shape = (input_shape[0] // num_parts,
input_shape[1] * num_parts,
*input_shape[2:])
return (ShapedArray(output_shape, dtype),)
# Implementation calling into the C++ bindings
def _cufftmp_translation(ctx, input, num_parts, dist, dir):
assert num_parts == jax.device_count()
input_type = mlir.ir.RankedTensorType(input.type)
dims_in = input_type.shape
fft_dims = dist.fft_shape(dims_in)
dims_out = dist.opposite.slab_shape(fft_dims)
output_type = mlir.ir.RankedTensorType.get(
dims_out,
input_type.element_type
)
layout = tuple(range(len(dims_in) - 1, -1, -1))
if len(fft_dims) == 2:
opaque = gpu_ops.build_cufftmp_descriptor(
fft_dims[0],
fft_dims[1],
1,
dist._C_enum,
dir._C_enum
)
elif len(fft_dims) == 3:
opaque = gpu_ops.build_cufftmp_descriptor(
fft_dims[0],
fft_dims[1],
fft_dims[2],
dist._C_enum,
dir._C_enum
)
else:
raise ValueError("Unsupported tensor rank; must be 2 or 3")
return [custom_call(
"gpu_cufftmp",
# Output types
out_types=[output_type],
# The inputs:
operands=[input,],
# Layout specification:
operand_layouts=[layout,],
result_layouts=[layout,],
# GPU specific additional data
backend_config=opaque
)]
# *********************************************
# * BOILERPLATE TO REGISTER THE OP WITH JAX *
# *********************************************
_cufftmp_prim = core.Primitive("cufftmp")
_cufftmp_prim.multiple_results = True
_cufftmp_prim.def_impl(partial(xla.apply_primitive, _cufftmp_prim))
_cufftmp_prim.def_abstract_eval(_cufftmp_abstract)
# Register the op with MLIR
mlir.register_lowering(_cufftmp_prim, _cufftmp_translation, platform="gpu")
| CUDALibrarySamples-master | cuFFTMp/JAX_FFT/src/cufftmp_jax/cufftmp_jax.py |
# -*- coding: utf-8 -*-
from .cufftmp_jax import cufftmp
| CUDALibrarySamples-master | cuFFTMp/JAX_FFT/src/cufftmp_jax/__init__.py |
# Copyright (c) 2022 NVIDIA CORPORATION.
# Licensed under the MIT license.
import os
import sys
from collections import defaultdict
from tqdm import tqdm
import argparse
import warnings
warnings.filterwarnings("ignore")
import numpy as np
from scipy.io import wavfile
from pesq import pesq
from pystoi import stoi
def evaluate_dns(testset_path, enhanced_path, target):
reverb = 'no'
result = defaultdict(int)
for i in tqdm(range(300)):
try:
rate, clean = wavfile.read(os.path.join(testset_path, "clean", "clean_fileid_{}.wav".format(i)))
if target == 'noisy':
rate, target_wav = wavfile.read(os.path.join(testset_path, "noisy", "noisy_fileid_{}.wav".format(i)))
else:
rate, target_wav = wavfile.read(os.path.join(enhanced_path, "enhanced_fileid_{}.wav".format(i)))
except:
continue
length = target_wav.shape[-1]
result['pesq_wb'] += pesq(16000, clean, target_wav, 'wb') * length # wide band
result['pesq_nb'] += pesq(16000, clean, target_wav, 'nb') * length # narrow band
result['stoi'] += stoi(clean, target_wav, rate) * length
result['count'] += 1 * length
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', type=str, default='dns', help='dataset')
parser.add_argument('-e', '--enhanced_path', type=str, help='enhanced audio path')
parser.add_argument('-t', '--testset_path', type=str, help='testset path')
args = parser.parse_args()
enhanced_path = args.enhanced_path
testset_path = args.testset_path
target = 'enhanced'
if args.dataset == 'dns':
result = evaluate_dns(testset_path, enhanced_path, target)
# logging
for key in result:
if key != 'count':
print('{} = {:.3f}'.format(key, result[key]/result['count']), end=", ")
| CleanUNet-main | python_eval.py |
# Adapted from https://github.com/kan-bayashi/ParallelWaveGAN
# Original Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""STFT-based Loss modules."""
import torch
import torch.nn.functional as F
from distutils.version import LooseVersion
is_pytorch_17plus = LooseVersion(torch.__version__) >= LooseVersion("1.7")
def stft(x, fft_size, hop_size, win_length, window):
"""Perform STFT and convert to magnitude spectrogram.
Args:
x (Tensor): Input signal tensor (B, T).
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length.
window (str): Window function type.
Returns:
Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).
"""
if is_pytorch_17plus:
x_stft = torch.stft(
x, fft_size, hop_size, win_length, window, return_complex=False
)
else:
x_stft = torch.stft(x, fft_size, hop_size, win_length, window)
real = x_stft[..., 0]
imag = x_stft[..., 1]
# NOTE(kan-bayashi): clamp is needed to avoid nan or inf
return torch.sqrt(torch.clamp(real**2 + imag**2, min=1e-7)).transpose(2, 1)
class SpectralConvergenceLoss(torch.nn.Module):
"""Spectral convergence loss module."""
def __init__(self):
"""Initilize spectral convergence loss module."""
super(SpectralConvergenceLoss, self).__init__()
def forward(self, x_mag, y_mag):
"""Calculate forward propagation.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Spectral convergence loss value.
"""
return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro")
class LogSTFTMagnitudeLoss(torch.nn.Module):
"""Log STFT magnitude loss module."""
def __init__(self):
"""Initilize los STFT magnitude loss module."""
super(LogSTFTMagnitudeLoss, self).__init__()
def forward(self, x_mag, y_mag):
"""Calculate forward propagation.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Log STFT magnitude loss value.
"""
return F.l1_loss(torch.log(y_mag), torch.log(x_mag))
class STFTLoss(torch.nn.Module):
"""STFT loss module."""
def __init__(
self, fft_size=1024, shift_size=120, win_length=600, window="hann_window",
band="full"
):
"""Initialize STFT loss module."""
super(STFTLoss, self).__init__()
self.fft_size = fft_size
self.shift_size = shift_size
self.win_length = win_length
self.band = band
self.spectral_convergence_loss = SpectralConvergenceLoss()
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
# NOTE(kan-bayashi): Use register_buffer to fix #223
self.register_buffer("window", getattr(torch, window)(win_length))
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T).
y (Tensor): Groundtruth signal (B, T).
Returns:
Tensor: Spectral convergence loss value.
Tensor: Log STFT magnitude loss value.
"""
x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window)
y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window)
if self.band == "high":
freq_mask_ind = x_mag.shape[1] // 2 # only select high frequency bands
sc_loss = self.spectral_convergence_loss(x_mag[:,freq_mask_ind:,:], y_mag[:,freq_mask_ind:,:])
mag_loss = self.log_stft_magnitude_loss(x_mag[:,freq_mask_ind:,:], y_mag[:,freq_mask_ind:,:])
elif self.band == "full":
sc_loss = self.spectral_convergence_loss(x_mag, y_mag)
mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
else:
raise NotImplementedError
return sc_loss, mag_loss
class MultiResolutionSTFTLoss(torch.nn.Module):
"""Multi resolution STFT loss module."""
def __init__(
self, fft_sizes=[1024, 2048, 512], hop_sizes=[120, 240, 50], win_lengths=[600, 1200, 240],
window="hann_window", sc_lambda=0.1, mag_lambda=0.1, band="full"
):
"""Initialize Multi resolution STFT loss module.
Args:
fft_sizes (list): List of FFT sizes.
hop_sizes (list): List of hop sizes.
win_lengths (list): List of window lengths.
window (str): Window function type.
*_lambda (float): a balancing factor across different losses.
band (str): high-band or full-band loss
"""
super(MultiResolutionSTFTLoss, self).__init__()
self.sc_lambda = sc_lambda
self.mag_lambda = mag_lambda
assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
self.stft_losses = torch.nn.ModuleList()
for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):
self.stft_losses += [STFTLoss(fs, ss, wl, window, band)]
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T) or (B, #subband, T).
y (Tensor): Groundtruth signal (B, T) or (B, #subband, T).
Returns:
Tensor: Multi resolution spectral convergence loss value.
Tensor: Multi resolution log STFT magnitude loss value.
"""
if len(x.shape) == 3:
x = x.view(-1, x.size(2)) # (B, C, T) -> (B x C, T)
y = y.view(-1, y.size(2)) # (B, C, T) -> (B x C, T)
sc_loss = 0.0
mag_loss = 0.0
for f in self.stft_losses:
sc_l, mag_l = f(x, y)
sc_loss += sc_l
mag_loss += mag_l
sc_loss *= self.sc_lambda
sc_loss /= len(self.stft_losses)
mag_loss *= self.mag_lambda
mag_loss /= len(self.stft_losses)
return sc_loss, mag_loss
| CleanUNet-main | stft_loss.py |
import os
import time
import functools
import numpy as np
from math import cos, pi, floor, sin
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from stft_loss import MultiResolutionSTFTLoss
def flatten(v):
return [x for y in v for x in y]
def rescale(x):
return (x - x.min()) / (x.max() - x.min())
def find_max_epoch(path):
"""
Find latest checkpoint
Returns:
maximum iteration, -1 if there is no (valid) checkpoint
"""
files = os.listdir(path)
epoch = -1
for f in files:
if len(f) <= 4:
continue
if f[-4:] == '.pkl':
number = f[:-4]
try:
epoch = max(epoch, int(number))
except:
continue
return epoch
def print_size(net, keyword=None):
"""
Print the number of parameters of a network
"""
if net is not None and isinstance(net, torch.nn.Module):
module_parameters = filter(lambda p: p.requires_grad, net.parameters())
params = sum([np.prod(p.size()) for p in module_parameters])
print("{} Parameters: {:.6f}M".format(
net.__class__.__name__, params / 1e6), flush=True, end="; ")
if keyword is not None:
keyword_parameters = [p for name, p in net.named_parameters() if p.requires_grad and keyword in name]
params = sum([np.prod(p.size()) for p in keyword_parameters])
print("{} Parameters: {:.6f}M".format(
keyword, params / 1e6), flush=True, end="; ")
print(" ")
####################### lr scheduler: Linear Warmup then Cosine Decay #############################
# Adapted from https://github.com/rosinality/vq-vae-2-pytorch
# Original Copyright 2019 Kim Seonghyeon
# MIT License (https://opensource.org/licenses/MIT)
def anneal_linear(start, end, proportion):
return start + proportion * (end - start)
def anneal_cosine(start, end, proportion):
cos_val = cos(pi * proportion) + 1
return end + (start - end) / 2 * cos_val
class Phase:
def __init__(self, start, end, n_iter, cur_iter, anneal_fn):
self.start, self.end = start, end
self.n_iter = n_iter
self.anneal_fn = anneal_fn
self.n = cur_iter
def step(self):
self.n += 1
return self.anneal_fn(self.start, self.end, self.n / self.n_iter)
def reset(self):
self.n = 0
@property
def is_done(self):
return self.n >= self.n_iter
class LinearWarmupCosineDecay:
def __init__(
self,
optimizer,
lr_max,
n_iter,
iteration=0,
divider=25,
warmup_proportion=0.3,
phase=('linear', 'cosine'),
):
self.optimizer = optimizer
phase1 = int(n_iter * warmup_proportion)
phase2 = n_iter - phase1
lr_min = lr_max / divider
phase_map = {'linear': anneal_linear, 'cosine': anneal_cosine}
cur_iter_phase1 = iteration
cur_iter_phase2 = max(0, iteration - phase1)
self.lr_phase = [
Phase(lr_min, lr_max, phase1, cur_iter_phase1, phase_map[phase[0]]),
Phase(lr_max, lr_min / 1e4, phase2, cur_iter_phase2, phase_map[phase[1]]),
]
if iteration < phase1:
self.phase = 0
else:
self.phase = 1
def step(self):
lr = self.lr_phase[self.phase].step()
for group in self.optimizer.param_groups:
group['lr'] = lr
if self.lr_phase[self.phase].is_done:
self.phase += 1
if self.phase >= len(self.lr_phase):
for phase in self.lr_phase:
phase.reset()
self.phase = 0
return lr
####################### model util #############################
def std_normal(size):
"""
Generate the standard Gaussian variable of a certain size
"""
return torch.normal(0, 1, size=size).cuda()
def weight_scaling_init(layer):
"""
weight rescaling initialization from https://arxiv.org/abs/1911.13254
"""
w = layer.weight.detach()
alpha = 10.0 * w.std()
layer.weight.data /= torch.sqrt(alpha)
layer.bias.data /= torch.sqrt(alpha)
@torch.no_grad()
def sampling(net, noisy_audio):
"""
Perform denoising (forward) step
"""
return net(noisy_audio)
def loss_fn(net, X, ell_p, ell_p_lambda, stft_lambda, mrstftloss, **kwargs):
"""
Loss function in CleanUNet
Parameters:
net: network
X: training data pair (clean audio, noisy_audio)
ell_p: \ell_p norm (1 or 2) of the AE loss
ell_p_lambda: factor of the AE loss
stft_lambda: factor of the STFT loss
mrstftloss: multi-resolution STFT loss function
Returns:
loss: value of objective function
output_dic: values of each component of loss
"""
assert type(X) == tuple and len(X) == 2
clean_audio, noisy_audio = X
B, C, L = clean_audio.shape
output_dic = {}
loss = 0.0
# AE loss
denoised_audio = net(noisy_audio)
if ell_p == 2:
ae_loss = nn.MSELoss()(denoised_audio, clean_audio)
elif ell_p == 1:
ae_loss = F.l1_loss(denoised_audio, clean_audio)
else:
raise NotImplementedError
loss += ae_loss * ell_p_lambda
output_dic["reconstruct"] = ae_loss.data * ell_p_lambda
if stft_lambda > 0:
sc_loss, mag_loss = mrstftloss(denoised_audio.squeeze(1), clean_audio.squeeze(1))
loss += (sc_loss + mag_loss) * stft_lambda
output_dic["stft_sc"] = sc_loss.data * stft_lambda
output_dic["stft_mag"] = mag_loss.data * stft_lambda
return loss, output_dic
| CleanUNet-main | util.py |
# Copyright (c) 2022 NVIDIA CORPORATION.
# Licensed under the MIT license.
import os
import numpy as np
from scipy.io.wavfile import read as wavread
import warnings
warnings.filterwarnings("ignore")
import torch
from torch.utils.data import Dataset
from torch.utils.data.distributed import DistributedSampler
import random
random.seed(0)
torch.manual_seed(0)
np.random.seed(0)
from torchvision import datasets, models, transforms
import torchaudio
class CleanNoisyPairDataset(Dataset):
"""
Create a Dataset of clean and noisy audio pairs.
Each element is a tuple of the form (clean waveform, noisy waveform, file_id)
"""
def __init__(self, root='./', subset='training', crop_length_sec=0):
super(CleanNoisyPairDataset).__init__()
assert subset is None or subset in ["training", "testing"]
self.crop_length_sec = crop_length_sec
self.subset = subset
N_clean = len(os.listdir(os.path.join(root, 'training_set/clean')))
N_noisy = len(os.listdir(os.path.join(root, 'training_set/noisy')))
assert N_clean == N_noisy
if subset == "training":
self.files = [(os.path.join(root, 'training_set/clean', 'fileid_{}.wav'.format(i)),
os.path.join(root, 'training_set/noisy', 'fileid_{}.wav'.format(i))) for i in range(N_clean)]
elif subset == "testing":
sortkey = lambda name: '_'.join(name.split('_')[-2:]) # specific for dns due to test sample names
_p = os.path.join(root, 'datasets/test_set/synthetic/no_reverb') # path for DNS
clean_files = os.listdir(os.path.join(_p, 'clean'))
noisy_files = os.listdir(os.path.join(_p, 'noisy'))
clean_files.sort(key=sortkey)
noisy_files.sort(key=sortkey)
self.files = []
for _c, _n in zip(clean_files, noisy_files):
assert sortkey(_c) == sortkey(_n)
self.files.append((os.path.join(_p, 'clean', _c),
os.path.join(_p, 'noisy', _n)))
self.crop_length_sec = 0
else:
raise NotImplementedError
def __getitem__(self, n):
fileid = self.files[n]
clean_audio, sample_rate = torchaudio.load(fileid[0])
noisy_audio, sample_rate = torchaudio.load(fileid[1])
clean_audio, noisy_audio = clean_audio.squeeze(0), noisy_audio.squeeze(0)
assert len(clean_audio) == len(noisy_audio)
crop_length = int(self.crop_length_sec * sample_rate)
assert crop_length < len(clean_audio)
# random crop
if self.subset != 'testing' and crop_length > 0:
start = np.random.randint(low=0, high=len(clean_audio) - crop_length + 1)
clean_audio = clean_audio[start:(start + crop_length)]
noisy_audio = noisy_audio[start:(start + crop_length)]
clean_audio, noisy_audio = clean_audio.unsqueeze(0), noisy_audio.unsqueeze(0)
return (clean_audio, noisy_audio, fileid)
def __len__(self):
return len(self.files)
def load_CleanNoisyPairDataset(root, subset, crop_length_sec, batch_size, sample_rate, num_gpus=1):
"""
Get dataloader with distributed sampling
"""
dataset = CleanNoisyPairDataset(root=root, subset=subset, crop_length_sec=crop_length_sec)
kwargs = {"batch_size": batch_size, "num_workers": 4, "pin_memory": False, "drop_last": False}
if num_gpus > 1:
train_sampler = DistributedSampler(dataset)
dataloader = torch.utils.data.DataLoader(dataset, sampler=train_sampler, **kwargs)
else:
dataloader = torch.utils.data.DataLoader(dataset, sampler=None, shuffle=True, **kwargs)
return dataloader
if __name__ == '__main__':
import json
with open('./configs/DNS-large-full.json') as f:
data = f.read()
config = json.loads(data)
trainset_config = config["trainset_config"]
trainloader = load_CleanNoisyPairDataset(**trainset_config, subset='training', batch_size=2, num_gpus=1)
testloader = load_CleanNoisyPairDataset(**trainset_config, subset='testing', batch_size=2, num_gpus=1)
print(len(trainloader), len(testloader))
for clean_audio, noisy_audio, fileid in trainloader:
clean_audio = clean_audio.cuda()
noisy_audio = noisy_audio.cuda()
print(clean_audio.shape, noisy_audio.shape, fileid)
break
| CleanUNet-main | dataset.py |
# Adapted from https://github.com/NVIDIA/waveglow under the BSD 3-Clause License.
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import sys
import time
import subprocess
import argparse
import warnings
warnings.filterwarnings("ignore")
import torch
import torch.distributed as dist
from torch.autograd import Variable
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= num_gpus
return rt
def init_distributed(rank, num_gpus, group_name, dist_backend, dist_url):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(dist_backend, init_method=dist_url,
world_size=num_gpus, rank=rank,
group_name=group_name)
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].contiguous().view(-1)
flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)
return flat
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
def apply_gradient_allreduce(module):
"""
Modifies existing model to do gradient allreduce, but doesn't change class
so you don't need "module"
"""
if not hasattr(dist, '_backend'):
module.warn_on_half = True
else:
module.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
for p in module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(module.needs_reduction):
module.needs_reduction = False
buckets = {}
for param in module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if module.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
module.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(module.parameters()):
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
dir(param)
def set_needs_reduction(self, input, output):
self.needs_reduction = True
module.register_forward_hook(set_needs_reduction)
return module
def main(config, stdout_dir, args_str):
args_list = ['train.py']
args_list += args_str.split(' ') if len(args_str) > 0 else []
args_list.append('--config={}'.format(config))
num_gpus = torch.cuda.device_count()
print('num_gpus: {}'.format(num_gpus))
args_list.append('--num_gpus={}'.format(num_gpus))
args_list.append("--group_name=group_{}".format(time.strftime("%Y_%m_%d-%H%M%S")))
if not os.path.isdir(stdout_dir):
os.makedirs(stdout_dir)
os.chmod(stdout_dir, 0o775)
workers = []
for i in range(num_gpus):
args_list[-2] = '--rank={}'.format(i)
stdout = None if i == 0 else open(
os.path.join(stdout_dir, "GPU_{}.log".format(i)), "w")
print(args_list)
p = subprocess.Popen([str(sys.executable)]+args_list, stdout=stdout)
workers.append(p)
for p in workers:
p.wait()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, default='config.json',
help='JSON file for configuration')
parser.add_argument('-s', '--stdout_dir', type=str, default="./logs/",
help='directory to save stoud logs')
parser.add_argument('-a', '--args_str', type=str, default='',
help='double quoted string with space separated key value pairs')
args = parser.parse_args()
main(args.config, args.stdout_dir, args.args_str)
| CleanUNet-main | distributed.py |
# Copyright (c) 2022 NVIDIA CORPORATION.
# Licensed under the MIT license.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from util import weight_scaling_init
# Transformer (encoder) https://github.com/jadore801120/attention-is-all-you-need-pytorch
# Original Copyright 2017 Victor Huang
# MIT License (https://opensource.org/licenses/MIT)
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
# Transpose for attention dot product: b x n x lq x dv
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1) # For head axis broadcasting.
q, attn = self.attention(q, k, v, mask=mask)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid) # position-wise
self.w_2 = nn.Linear(d_hid, d_in) # position-wise
self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
def get_subsequent_mask(seq):
''' For masking out the subsequent info. '''
sz_b, len_s = seq.size()
subsequent_mask = (1 - torch.triu(
torch.ones((1, len_s, len_s), device=seq.device), diagonal=1)).bool()
return subsequent_mask
class PositionalEncoding(nn.Module):
def __init__(self, d_hid, n_position=200):
super(PositionalEncoding, self).__init__()
# Not a parameter
self.register_buffer('pos_table', self._get_sinusoid_encoding_table(n_position, d_hid))
def _get_sinusoid_encoding_table(self, n_position, d_hid):
''' Sinusoid position encoding table '''
# TODO: make it with torch instead of numpy
def get_position_angle_vec(position):
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
def forward(self, x):
return x + self.pos_table[:, :x.size(1)].clone().detach()
class EncoderLayer(nn.Module):
''' Compose with two layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.0):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(self, enc_input, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask)
enc_output = self.pos_ffn(enc_output)
return enc_output, enc_slf_attn
class TransformerEncoder(nn.Module):
''' A encoder model with self attention mechanism. '''
def __init__(
self, d_word_vec=512, n_layers=2, n_head=8, d_k=64, d_v=64,
d_model=512, d_inner=2048, dropout=0.1, n_position=624, scale_emb=False):
super().__init__()
# self.src_word_emb = nn.Embedding(n_src_vocab, d_word_vec, padding_idx=pad_idx)
if n_position > 0:
self.position_enc = PositionalEncoding(d_word_vec, n_position=n_position)
else:
self.position_enc = lambda x: x
self.dropout = nn.Dropout(p=dropout)
self.layer_stack = nn.ModuleList([
EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.scale_emb = scale_emb
self.d_model = d_model
def forward(self, src_seq, src_mask, return_attns=False):
enc_slf_attn_list = []
# -- Forward
# enc_output = self.src_word_emb(src_seq)
enc_output = src_seq
if self.scale_emb:
enc_output *= self.d_model ** 0.5
enc_output = self.dropout(self.position_enc(enc_output))
enc_output = self.layer_norm(enc_output)
for enc_layer in self.layer_stack:
enc_output, enc_slf_attn = enc_layer(enc_output, slf_attn_mask=src_mask)
enc_slf_attn_list += [enc_slf_attn] if return_attns else []
if return_attns:
return enc_output, enc_slf_attn_list
return enc_output
# CleanUNet architecture
def padding(x, D, K, S):
"""padding zeroes to x so that denoised audio has the same length"""
L = x.shape[-1]
for _ in range(D):
if L < K:
L = 1
else:
L = 1 + np.ceil((L - K) / S)
for _ in range(D):
L = (L - 1) * S + K
L = int(L)
x = F.pad(x, (0, L - x.shape[-1]))
return x
class CleanUNet(nn.Module):
""" CleanUNet architecture. """
def __init__(self, channels_input=1, channels_output=1,
channels_H=64, max_H=768,
encoder_n_layers=8, kernel_size=4, stride=2,
tsfm_n_layers=3,
tsfm_n_head=8,
tsfm_d_model=512,
tsfm_d_inner=2048):
"""
Parameters:
channels_input (int): input channels
channels_output (int): output channels
channels_H (int): middle channels H that controls capacity
max_H (int): maximum H
encoder_n_layers (int): number of encoder/decoder layers D
kernel_size (int): kernel size K
stride (int): stride S
tsfm_n_layers (int): number of self attention blocks N
tsfm_n_head (int): number of heads in each self attention block
tsfm_d_model (int): d_model of self attention
tsfm_d_inner (int): d_inner of self attention
"""
super(CleanUNet, self).__init__()
self.channels_input = channels_input
self.channels_output = channels_output
self.channels_H = channels_H
self.max_H = max_H
self.encoder_n_layers = encoder_n_layers
self.kernel_size = kernel_size
self.stride = stride
self.tsfm_n_layers = tsfm_n_layers
self.tsfm_n_head = tsfm_n_head
self.tsfm_d_model = tsfm_d_model
self.tsfm_d_inner = tsfm_d_inner
# encoder and decoder
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
for i in range(encoder_n_layers):
self.encoder.append(nn.Sequential(
nn.Conv1d(channels_input, channels_H, kernel_size, stride),
nn.ReLU(),
nn.Conv1d(channels_H, channels_H * 2, 1),
nn.GLU(dim=1)
))
channels_input = channels_H
if i == 0:
# no relu at end
self.decoder.append(nn.Sequential(
nn.Conv1d(channels_H, channels_H * 2, 1),
nn.GLU(dim=1),
nn.ConvTranspose1d(channels_H, channels_output, kernel_size, stride)
))
else:
self.decoder.insert(0, nn.Sequential(
nn.Conv1d(channels_H, channels_H * 2, 1),
nn.GLU(dim=1),
nn.ConvTranspose1d(channels_H, channels_output, kernel_size, stride),
nn.ReLU()
))
channels_output = channels_H
# double H but keep below max_H
channels_H *= 2
channels_H = min(channels_H, max_H)
# self attention block
self.tsfm_conv1 = nn.Conv1d(channels_output, tsfm_d_model, kernel_size=1)
self.tsfm_encoder = TransformerEncoder(d_word_vec=tsfm_d_model,
n_layers=tsfm_n_layers,
n_head=tsfm_n_head,
d_k=tsfm_d_model // tsfm_n_head,
d_v=tsfm_d_model // tsfm_n_head,
d_model=tsfm_d_model,
d_inner=tsfm_d_inner,
dropout=0.0,
n_position=0,
scale_emb=False)
self.tsfm_conv2 = nn.Conv1d(tsfm_d_model, channels_output, kernel_size=1)
# weight scaling initialization
for layer in self.modules():
if isinstance(layer, (nn.Conv1d, nn.ConvTranspose1d)):
weight_scaling_init(layer)
def forward(self, noisy_audio):
# (B, L) -> (B, C, L)
if len(noisy_audio.shape) == 2:
noisy_audio = noisy_audio.unsqueeze(1)
B, C, L = noisy_audio.shape
assert C == 1
# normalization and padding
std = noisy_audio.std(dim=2, keepdim=True) + 1e-3
noisy_audio /= std
x = padding(noisy_audio, self.encoder_n_layers, self.kernel_size, self.stride)
# encoder
skip_connections = []
for downsampling_block in self.encoder:
x = downsampling_block(x)
skip_connections.append(x)
skip_connections = skip_connections[::-1]
# attention mask for causal inference; for non-causal, set attn_mask to None
len_s = x.shape[-1] # length at bottleneck
attn_mask = (1 - torch.triu(torch.ones((1, len_s, len_s), device=x.device), diagonal=1)).bool()
x = self.tsfm_conv1(x) # C 1024 -> 512
x = x.permute(0, 2, 1)
x = self.tsfm_encoder(x, src_mask=attn_mask)
x = x.permute(0, 2, 1)
x = self.tsfm_conv2(x) # C 512 -> 1024
# decoder
for i, upsampling_block in enumerate(self.decoder):
skip_i = skip_connections[i]
x += skip_i[:, :, :x.shape[-1]]
x = upsampling_block(x)
x = x[:, :, :L] * std
return x
if __name__ == '__main__':
import json
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, default='configs/DNS-large-full.json',
help='JSON file for configuration')
args = parser.parse_args()
with open(args.config) as f:
data = f.read()
config = json.loads(data)
network_config = config["network_config"]
model = CleanUNet(**network_config).cuda()
from util import print_size
print_size(model, keyword="tsfm")
input_data = torch.ones([4,1,int(4.5*16000)]).cuda()
output = model(input_data)
print(output.shape)
y = torch.rand([4,1,int(4.5*16000)]).cuda()
loss = torch.nn.MSELoss()(y, output)
loss.backward()
print(loss.item())
| CleanUNet-main | network.py |
# Adapted from https://github.com/NVIDIA/waveglow under the BSD 3-Clause License.
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import time
import argparse
import json
import numpy as np
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import random
random.seed(0)
torch.manual_seed(0)
np.random.seed(0)
from distributed import init_distributed, apply_gradient_allreduce, reduce_tensor
from dataset import load_CleanNoisyPairDataset
from stft_loss import MultiResolutionSTFTLoss
from util import rescale, find_max_epoch, print_size
from util import LinearWarmupCosineDecay, loss_fn
from network import CleanUNet
def train(num_gpus, rank, group_name,
exp_path, log, optimization, loss_config):
# setup local experiment path
if rank == 0:
print('exp_path:', exp_path)
# Create tensorboard logger.
log_directory = os.path.join(log["directory"], exp_path)
if rank == 0:
tb = SummaryWriter(os.path.join(log_directory, 'tensorboard'))
# distributed running initialization
if num_gpus > 1:
init_distributed(rank, num_gpus, group_name, **dist_config)
# Get shared ckpt_directory ready
ckpt_directory = os.path.join(log_directory, 'checkpoint')
if rank == 0:
if not os.path.isdir(ckpt_directory):
os.makedirs(ckpt_directory)
os.chmod(ckpt_directory, 0o775)
print("ckpt_directory: ", ckpt_directory, flush=True)
# load training data
trainloader = load_CleanNoisyPairDataset(**trainset_config,
subset='training',
batch_size=optimization["batch_size_per_gpu"],
num_gpus=num_gpus)
print('Data loaded')
# predefine model
net = CleanUNet(**network_config).cuda()
print_size(net)
# apply gradient all reduce
if num_gpus > 1:
net = apply_gradient_allreduce(net)
# define optimizer
optimizer = torch.optim.Adam(net.parameters(), lr=optimization["learning_rate"])
# load checkpoint
time0 = time.time()
if log["ckpt_iter"] == 'max':
ckpt_iter = find_max_epoch(ckpt_directory)
else:
ckpt_iter = log["ckpt_iter"]
if ckpt_iter >= 0:
try:
# load checkpoint file
model_path = os.path.join(ckpt_directory, '{}.pkl'.format(ckpt_iter))
checkpoint = torch.load(model_path, map_location='cpu')
# feed model dict and optimizer state
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# record training time based on elapsed time
time0 -= checkpoint['training_time_seconds']
print('Model at iteration %s has been trained for %s seconds' % (ckpt_iter, checkpoint['training_time_seconds']))
print('checkpoint model loaded successfully')
except:
ckpt_iter = -1
print('No valid checkpoint model found, start training from initialization.')
else:
ckpt_iter = -1
print('No valid checkpoint model found, start training from initialization.')
# training
n_iter = ckpt_iter + 1
# define learning rate scheduler and stft-loss
scheduler = LinearWarmupCosineDecay(
optimizer,
lr_max=optimization["learning_rate"],
n_iter=optimization["n_iters"],
iteration=n_iter,
divider=25,
warmup_proportion=0.05,
phase=('linear', 'cosine'),
)
if loss_config["stft_lambda"] > 0:
mrstftloss = MultiResolutionSTFTLoss(**loss_config["stft_config"]).cuda()
else:
mrstftloss = None
while n_iter < optimization["n_iters"] + 1:
# for each epoch
for clean_audio, noisy_audio, _ in trainloader:
clean_audio = clean_audio.cuda()
noisy_audio = noisy_audio.cuda()
# If you have a data augmentation function augment()
# noise = noisy_audio - clean_audio
# noise, clean_audio = augment((noise, clean_audio))
# noisy_audio = noise + clean_audio
# back-propagation
optimizer.zero_grad()
X = (clean_audio, noisy_audio)
loss, loss_dic = loss_fn(net, X, **loss_config, mrstftloss=mrstftloss)
if num_gpus > 1:
reduced_loss = reduce_tensor(loss.data, num_gpus).item()
else:
reduced_loss = loss.item()
loss.backward()
grad_norm = nn.utils.clip_grad_norm_(net.parameters(), 1e9)
scheduler.step()
optimizer.step()
# output to log
if n_iter % log["iters_per_valid"] == 0:
print("iteration: {} \treduced loss: {:.7f} \tloss: {:.7f}".format(
n_iter, reduced_loss, loss.item()), flush=True)
if rank == 0:
# save to tensorboard
tb.add_scalar("Train/Train-Loss", loss.item(), n_iter)
tb.add_scalar("Train/Train-Reduced-Loss", reduced_loss, n_iter)
tb.add_scalar("Train/Gradient-Norm", grad_norm, n_iter)
tb.add_scalar("Train/learning-rate", optimizer.param_groups[0]["lr"], n_iter)
# save checkpoint
if n_iter > 0 and n_iter % log["iters_per_ckpt"] == 0 and rank == 0:
checkpoint_name = '{}.pkl'.format(n_iter)
torch.save({'iter': n_iter,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'training_time_seconds': int(time.time()-time0)},
os.path.join(ckpt_directory, checkpoint_name))
print('model at iteration %s is saved' % n_iter)
n_iter += 1
# After training, close TensorBoard.
if rank == 0:
tb.close()
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, default='config.json',
help='JSON file for configuration')
parser.add_argument('-r', '--rank', type=int, default=0,
help='rank of process for distributed')
parser.add_argument('-g', '--group_name', type=str, default='',
help='name of group for distributed')
args = parser.parse_args()
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
config = json.loads(data)
train_config = config["train_config"] # training parameters
global dist_config
dist_config = config["dist_config"] # to initialize distributed training
global network_config
network_config = config["network_config"] # to define network
global trainset_config
trainset_config = config["trainset_config"] # to load trainset
num_gpus = torch.cuda.device_count()
if num_gpus > 1:
if args.group_name == '':
print("WARNING: Multiple GPUs detected but no distributed group set")
print("Only running 1 GPU. Use distributed.py for multiple GPUs")
num_gpus = 1
if num_gpus == 1 and args.rank != 0:
raise Exception("Doing single GPU training on rank > 0")
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
train(num_gpus, args.rank, args.group_name, **train_config)
| CleanUNet-main | train.py |
# Adapted from https://github.com/NVIDIA/waveglow under the BSD 3-Clause License.
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import argparse
import json
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
# from torch.utils.tensorboard import SummaryWriter
import random
random.seed(0)
torch.manual_seed(0)
np.random.seed(0)
from scipy.io.wavfile import write as wavwrite
from scipy.io.wavfile import read as wavread
from dataset import load_CleanNoisyPairDataset
from util import rescale, find_max_epoch, print_size, sampling
from network import CleanUNet
def denoise(output_directory, ckpt_iter, subset, dump=False):
"""
Denoise audio
Parameters:
output_directory (str): save generated speeches to this path
ckpt_iter (int or 'max'): the pretrained checkpoint to be loaded;
automitically selects the maximum iteration if 'max' is selected
subset (str): training, testing, validation
dump (bool): whether save enhanced (denoised) audio
"""
# setup local experiment path
exp_path = train_config["exp_path"]
print('exp_path:', exp_path)
# load data
loader_config = deepcopy(trainset_config)
loader_config["crop_length_sec"] = 0
dataloader = load_CleanNoisyPairDataset(
**loader_config,
subset=subset,
batch_size=1,
num_gpus=1
)
# predefine model
net = CleanUNet(**network_config).cuda()
print_size(net)
# load checkpoint
ckpt_directory = os.path.join(train_config["log"]["directory"], exp_path, 'checkpoint')
if ckpt_iter == 'max':
ckpt_iter = find_max_epoch(ckpt_directory)
if ckpt_iter != 'pretrained':
ckpt_iter = int(ckpt_iter)
model_path = os.path.join(ckpt_directory, '{}.pkl'.format(ckpt_iter))
checkpoint = torch.load(model_path, map_location='cpu')
net.load_state_dict(checkpoint['model_state_dict'])
net.eval()
# get output directory ready
if ckpt_iter == "pretrained":
speech_directory = os.path.join(output_directory, exp_path, 'speech', ckpt_iter)
else:
speech_directory = os.path.join(output_directory, exp_path, 'speech', '{}k'.format(ckpt_iter//1000))
if dump and not os.path.isdir(speech_directory):
os.makedirs(speech_directory)
os.chmod(speech_directory, 0o775)
print("speech_directory: ", speech_directory, flush=True)
# inference
all_generated_audio = []
all_clean_audio = []
sortkey = lambda name: '_'.join(name.split('/')[-1].split('_')[1:])
for clean_audio, noisy_audio, fileid in tqdm(dataloader):
filename = sortkey(fileid[0][0])
noisy_audio = noisy_audio.cuda()
LENGTH = len(noisy_audio[0].squeeze())
generated_audio = sampling(net, noisy_audio)
if dump:
wavwrite(os.path.join(speech_directory, 'enhanced_{}'.format(filename)),
trainset_config["sample_rate"],
generated_audio[0].squeeze().cpu().numpy())
else:
all_clean_audio.append(clean_audio[0].squeeze().cpu().numpy())
all_generated_audio.append(generated_audio[0].squeeze().cpu().numpy())
return all_clean_audio, all_generated_audio
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, default='config.json',
help='JSON file for configuration')
parser.add_argument('-ckpt_iter', '--ckpt_iter', default='max',
help='Which checkpoint to use; assign a number or "max" or "pretrained"')
parser.add_argument('-subset', '--subset', type=str, choices=['training', 'testing', 'validation'],
default='testing', help='subset for denoising')
args = parser.parse_args()
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
config = json.loads(data)
gen_config = config["gen_config"]
global network_config
network_config = config["network_config"] # to define wavenet
global train_config
train_config = config["train_config"] # train config
global trainset_config
trainset_config = config["trainset_config"] # to read trainset configurations
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
if args.subset == "testing":
denoise(gen_config["output_directory"],
subset=args.subset,
ckpt_iter=args.ckpt_iter,
dump=True)
| CleanUNet-main | denoise.py |
"""
# Code adapted from:
# https://github.com/facebookresearch/Detectron/blob/master/detectron/core/config.py
Source License
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
##############################################################################
# Config
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import torch
from utils.attr_dict import AttrDict
from runx.logx import logx
__C = AttrDict()
cfg = __C
__C.GLOBAL_RANK = 0
__C.EPOCH = 0
# Absolute path to a location to keep some large files, not in this dir.
__C.ASSETS_PATH = '/home/dcg-adlr-atao-data.cosmos277/assets'
# Use class weighted loss per batch to increase loss for low pixel count classes per batch
__C.BATCH_WEIGHTING = False
# Border Relaxation Count
__C.BORDER_WINDOW = 1
# Number of epoch to use before turn off border restriction
__C.REDUCE_BORDER_EPOCH = -1
# Comma Seperated List of class id to relax
__C.STRICTBORDERCLASS = None
# Where output results get written
__C.RESULT_DIR = None
__C.OPTIONS = AttrDict()
__C.OPTIONS.TEST_MODE = False
__C.OPTIONS.INIT_DECODER = False
__C.OPTIONS.TORCH_VERSION = None
__C.TRAIN = AttrDict()
__C.TRAIN.RANDOM_BRIGHTNESS_SHIFT_VALUE = 10
__C.TRAIN.FP16 = False
#Attribute Dictionary for Dataset
__C.DATASET = AttrDict()
#Cityscapes Dir Location
__C.DATASET.CITYSCAPES_DIR = \
os.path.join(__C.ASSETS_PATH, 'data/Cityscapes')
__C.DATASET.CITYSCAPES_CUSTOMCOARSE = \
os.path.join(__C.ASSETS_PATH, 'data/Cityscapes/autolabelled')
__C.DATASET.CENTROID_ROOT = \
os.path.join(__C.ASSETS_PATH, 'uniform_centroids')
#SDC Augmented Cityscapes Dir Location
__C.DATASET.CITYSCAPES_AUG_DIR = ''
#Mapillary Dataset Dir Location
__C.DATASET.MAPILLARY_DIR = os.path.join(__C.ASSETS_PATH, 'data/Mapillary/data')
#Kitti Dataset Dir Location
__C.DATASET.KITTI_DIR = ''
#SDC Augmented Kitti Dataset Dir Location
__C.DATASET.KITTI_AUG_DIR = ''
#Camvid Dataset Dir Location
__C.DATASET.CAMVID_DIR = ''
#Number of splits to support
__C.DATASET.CITYSCAPES_SPLITS = 3
__C.DATASET.MEAN = [0.485, 0.456, 0.406]
__C.DATASET.STD = [0.229, 0.224, 0.225]
__C.DATASET.NAME = ''
__C.DATASET.NUM_CLASSES = 0
__C.DATASET.IGNORE_LABEL = 255
__C.DATASET.DUMP_IMAGES = False
__C.DATASET.CLASS_UNIFORM_PCT = 0.5
__C.DATASET.CLASS_UNIFORM_TILE = 1024
__C.DATASET.COARSE_BOOST_CLASSES = None
__C.DATASET.CV = 0
__C.DATASET.COLORIZE_MASK_FN = None
__C.DATASET.CUSTOM_COARSE_PROB = None
__C.DATASET.MASK_OUT_CITYSCAPES = False
# This enables there to always be translation augmentation during random crop
# process, even if image is smaller than crop size.
__C.DATASET.TRANSLATE_AUG_FIX = False
__C.DATASET.LANCZOS_SCALES = False
# Use a center crop of size args.pre_size for mapillary validation
# Need to use this if you want to dump images
__C.DATASET.MAPILLARY_CROP_VAL = False
__C.DATASET.CROP_SIZE = '896'
__C.MODEL = AttrDict()
__C.MODEL.BN = 'regularnorm'
__C.MODEL.BNFUNC = None
__C.MODEL.MSCALE = False
__C.MODEL.THREE_SCALE = False
__C.MODEL.ALT_TWO_SCALE = False
__C.MODEL.EXTRA_SCALES = '0.5,1.5'
__C.MODEL.N_SCALES = None
__C.MODEL.ALIGN_CORNERS = False
__C.MODEL.MSCALE_LO_SCALE = 0.5
__C.MODEL.OCR_ASPP = False
__C.MODEL.SEGATTN_BOT_CH = 256
__C.MODEL.ASPP_BOT_CH = 256
__C.MODEL.MSCALE_CAT_SCALE_FLT = False
__C.MODEL.MSCALE_INNER_3x3 = True
__C.MODEL.MSCALE_DROPOUT = False
__C.MODEL.MSCALE_OLDARCH = False
__C.MODEL.MSCALE_INIT = 0.5
__C.MODEL.ATTNSCALE_BN_HEAD = False
__C.MODEL.GRAD_CKPT = False
WEIGHTS_PATH = os.path.join(__C.ASSETS_PATH, 'seg_weights')
__C.MODEL.WRN38_CHECKPOINT = \
os.path.join(WEIGHTS_PATH, 'wider_resnet38.pth.tar')
__C.MODEL.WRN41_CHECKPOINT = \
os.path.join(WEIGHTS_PATH, 'wider_resnet41_cornflower_sunfish.pth')
__C.MODEL.X71_CHECKPOINT = \
os.path.join(WEIGHTS_PATH, 'aligned_xception71.pth')
__C.MODEL.HRNET_CHECKPOINT = \
os.path.join(WEIGHTS_PATH, 'hrnetv2_w48_imagenet_pretrained.pth')
__C.LOSS = AttrDict()
# Weight for OCR aux loss
__C.LOSS.OCR_ALPHA = 0.4
# Use RMI for the OCR aux loss
__C.LOSS.OCR_AUX_RMI = False
# Supervise the multi-scale predictions directly
__C.LOSS.SUPERVISED_MSCALE_WT = 0
__C.MODEL.OCR = AttrDict()
__C.MODEL.OCR.MID_CHANNELS = 512
__C.MODEL.OCR.KEY_CHANNELS = 256
__C.MODEL.OCR_EXTRA = AttrDict()
__C.MODEL.OCR_EXTRA.FINAL_CONV_KERNEL = 1
__C.MODEL.OCR_EXTRA.STAGE1 = AttrDict()
__C.MODEL.OCR_EXTRA.STAGE1.NUM_MODULES = 1
__C.MODEL.OCR_EXTRA.STAGE1.NUM_RANCHES = 1
__C.MODEL.OCR_EXTRA.STAGE1.BLOCK = 'BOTTLENECK'
__C.MODEL.OCR_EXTRA.STAGE1.NUM_BLOCKS = [4]
__C.MODEL.OCR_EXTRA.STAGE1.NUM_CHANNELS = [64]
__C.MODEL.OCR_EXTRA.STAGE1.FUSE_METHOD = 'SUM'
__C.MODEL.OCR_EXTRA.STAGE2 = AttrDict()
__C.MODEL.OCR_EXTRA.STAGE2.NUM_MODULES = 1
__C.MODEL.OCR_EXTRA.STAGE2.NUM_BRANCHES = 2
__C.MODEL.OCR_EXTRA.STAGE2.BLOCK = 'BASIC'
__C.MODEL.OCR_EXTRA.STAGE2.NUM_BLOCKS = [4, 4]
__C.MODEL.OCR_EXTRA.STAGE2.NUM_CHANNELS = [48, 96]
__C.MODEL.OCR_EXTRA.STAGE2.FUSE_METHOD = 'SUM'
__C.MODEL.OCR_EXTRA.STAGE3 = AttrDict()
__C.MODEL.OCR_EXTRA.STAGE3.NUM_MODULES = 4
__C.MODEL.OCR_EXTRA.STAGE3.NUM_BRANCHES = 3
__C.MODEL.OCR_EXTRA.STAGE3.BLOCK = 'BASIC'
__C.MODEL.OCR_EXTRA.STAGE3.NUM_BLOCKS = [4, 4, 4]
__C.MODEL.OCR_EXTRA.STAGE3.NUM_CHANNELS = [48, 96, 192]
__C.MODEL.OCR_EXTRA.STAGE3.FUSE_METHOD = 'SUM'
__C.MODEL.OCR_EXTRA.STAGE4 = AttrDict()
__C.MODEL.OCR_EXTRA.STAGE4.NUM_MODULES = 3
__C.MODEL.OCR_EXTRA.STAGE4.NUM_BRANCHES = 4
__C.MODEL.OCR_EXTRA.STAGE4.BLOCK = 'BASIC'
__C.MODEL.OCR_EXTRA.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
__C.MODEL.OCR_EXTRA.STAGE4.NUM_CHANNELS = [48, 96, 192, 384]
__C.MODEL.OCR_EXTRA.STAGE4.FUSE_METHOD = 'SUM'
def torch_version_float():
version_str = torch.__version__
version_re = re.search(r'^([0-9]+\.[0-9]+)', version_str)
if version_re:
version = float(version_re.group(1))
logx.msg(f'Torch version: {version}, {version_str}')
else:
version = 1.0
logx.msg(f'Can\'t parse torch version ({version}), assuming {version}')
return version
def assert_and_infer_cfg(args, make_immutable=True, train_mode=True):
"""Call this function in your script after you have finished setting all cfg
values that are necessary (e.g., merging a config from a file, merging
command line config options, etc.). By default, this function will also
mark the global cfg as immutable to prevent changing the global cfg
settings during script execution (which can lead to hard to debug errors
or code that's harder to understand than is necessary).
"""
__C.OPTIONS.TORCH_VERSION = torch_version_float()
if hasattr(args, 'syncbn') and args.syncbn:
if args.apex:
import apex
__C.MODEL.BN = 'apex-syncnorm'
__C.MODEL.BNFUNC = apex.parallel.SyncBatchNorm
else:
raise Exception('No Support for SyncBN without Apex')
else:
__C.MODEL.BNFUNC = torch.nn.BatchNorm2d
print('Using regular batch norm')
if not train_mode:
cfg.immutable(True)
return
if args.batch_weighting:
__C.BATCH_WEIGHTING = True
if args.custom_coarse_prob:
__C.DATASET.CUSTOM_COARSE_PROB = args.custom_coarse_prob
if args.jointwtborder:
if args.strict_bdr_cls != '':
strict_classes = [int(i) for i in args.strict_bdr_cls.split(",")]
__C.STRICTBORDERCLASS = strict_classes
if args.rlx_off_epoch > -1:
__C.REDUCE_BORDER_EPOCH = args.rlx_off_epoch
cfg.DATASET.NAME = args.dataset
cfg.DATASET.DUMP_IMAGES = args.dump_augmentation_images
cfg.DATASET.CLASS_UNIFORM_PCT = args.class_uniform_pct
cfg.DATASET.CLASS_UNIFORM_TILE = args.class_uniform_tile
if args.coarse_boost_classes:
cfg.DATASET.COARSE_BOOST_CLASSES = \
[int(i) for i in args.coarse_boost_classes.split(',')]
cfg.DATASET.CLASS_UNIFORM_BIAS = None
if args.dump_assets and args.dataset == 'cityscapes':
# A hacky way to force that when we dump cityscapes
logx.msg('*' * 70)
logx.msg(f'ALERT: forcing cv=3 to allow all images to be evaluated')
logx.msg('*' * 70)
cfg.DATASET.CV = 3
else:
cfg.DATASET.CV = args.cv
# Total number of splits
cfg.DATASET.CV_SPLITS = 3
if args.translate_aug_fix:
cfg.DATASET.TRANSLATE_AUG_FIX = True
cfg.MODEL.MSCALE = ('mscale' in args.arch.lower() or 'attnscale' in
args.arch.lower())
if args.three_scale:
cfg.MODEL.THREE_SCALE = True
if args.alt_two_scale:
cfg.MODEL.ALT_TWO_SCALE = True
cfg.MODEL.MSCALE_LO_SCALE = args.mscale_lo_scale
def str2list(s):
alist = s.split(',')
alist = [float(x) for x in alist]
return alist
if args.n_scales:
cfg.MODEL.N_SCALES = str2list(args.n_scales)
logx.msg('n scales {}'.format(cfg.MODEL.N_SCALES))
if args.extra_scales:
cfg.MODEL.EXTRA_SCALES = str2list(args.extra_scales)
if args.align_corners:
cfg.MODEL.ALIGN_CORNERS = True
if args.init_decoder:
cfg.OPTIONS.INIT_DECODER = True
cfg.RESULT_DIR = args.result_dir
if args.mask_out_cityscapes:
cfg.DATASET.MASK_OUT_CITYSCAPES = True
if args.fp16:
cfg.TRAIN.FP16 = True
if args.map_crop_val:
__C.DATASET.MAPILLARY_CROP_VAL = True
__C.DATASET.CROP_SIZE = args.crop_size
if args.aspp_bot_ch is not None:
# todo fixme: make all code use this cfg
__C.MODEL.ASPP_BOT_CH = int(args.aspp_bot_ch)
if args.mscale_cat_scale_flt:
__C.MODEL.MSCALE_CAT_SCALE_FLT = True
if args.mscale_no3x3:
__C.MODEL.MSCALE_INNER_3x3 = False
if args.mscale_dropout:
__C.MODEL.MSCALE_DROPOUT = True
if args.mscale_old_arch:
__C.MODEL.MSCALE_OLDARCH = True
if args.mscale_init is not None:
__C.MODEL.MSCALE_INIT = args.mscale_init
if args.attnscale_bn_head:
__C.MODEL.ATTNSCALE_BN_HEAD = True
if args.segattn_bot_ch is not None:
__C.MODEL.SEGATTN_BOT_CH = args.segattn_bot_ch
if args.set_cityscapes_root is not None:
# '/data/cs_imgs_cv0'
# '/data/cs_imgs_cv2'
__C.DATASET.CITYSCAPES_DIR = args.set_cityscapes_root
if args.ocr_alpha is not None:
__C.LOSS.OCR_ALPHA = args.ocr_alpha
if args.ocr_aux_loss_rmi:
__C.LOSS.OCR_AUX_RMI = True
if args.supervised_mscale_loss_wt is not None:
__C.LOSS.SUPERVISED_MSCALE_WT = args.supervised_mscale_loss_wt
cfg.DROPOUT_COARSE_BOOST_CLASSES = None
if args.custom_coarse_dropout_classes:
cfg.DROPOUT_COARSE_BOOST_CLASSES = \
[int(i) for i in args.custom_coarse_dropout_classes.split(',')]
if args.grad_ckpt:
__C.MODEL.GRAD_CKPT = True
__C.GLOBAL_RANK = args.global_rank
if make_immutable:
cfg.immutable(True)
def update_epoch(epoch):
# Update EPOCH CTR
cfg.immutable(False)
cfg.EPOCH = epoch
cfg.immutable(True)
def update_dataset_cfg(num_classes, ignore_label):
cfg.immutable(False)
cfg.DATASET.NUM_CLASSES = num_classes
cfg.DATASET.IGNORE_LABEL = ignore_label
logx.msg('num_classes = {}'.format(num_classes))
cfg.immutable(True)
def update_dataset_inst(dataset_inst):
cfg.immutable(False)
cfg.DATASET_INST = dataset_inst
cfg.immutable(True)
| semantic-segmentation-main | config.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import absolute_import
from __future__ import division
import argparse
import os
import sys
import time
import torch
from apex import amp
from runx.logx import logx
from config import assert_and_infer_cfg, update_epoch, cfg
from utils.misc import AverageMeter, prep_experiment, eval_metrics
from utils.misc import ImageDumper
from utils.trnval_utils import eval_minibatch, validate_topn
from loss.utils import get_loss
from loss.optimizer import get_optimizer, restore_opt, restore_net
import datasets
import network
# Import autoresume module
sys.path.append(os.environ.get('SUBMIT_SCRIPTS', '.'))
AutoResume = None
try:
from userlib.auto_resume import AutoResume
except ImportError:
print(AutoResume)
# Argument Parser
parser = argparse.ArgumentParser(description='Semantic Segmentation')
parser.add_argument('--lr', type=float, default=0.002)
parser.add_argument('--arch', type=str, default='deepv3.DeepWV3Plus',
help='Network architecture. We have DeepSRNX50V3PlusD (backbone: ResNeXt50) \
and deepWV3Plus (backbone: WideResNet38).')
parser.add_argument('--dataset', type=str, default='cityscapes',
help='cityscapes, mapillary, camvid, kitti')
parser.add_argument('--dataset_inst', default=None,
help='placeholder for dataset instance')
parser.add_argument('--num_workers', type=int, default=4,
help='cpu worker threads per dataloader instance')
parser.add_argument('--cv', type=int, default=0,
help=('Cross-validation split id to use. Default # of splits set'
' to 3 in config'))
parser.add_argument('--class_uniform_pct', type=float, default=0.5,
help='What fraction of images is uniformly sampled')
parser.add_argument('--class_uniform_tile', type=int, default=1024,
help='tile size for class uniform sampling')
parser.add_argument('--coarse_boost_classes', type=str, default=None,
help='Use coarse annotations for specific classes')
parser.add_argument('--custom_coarse_dropout_classes', type=str, default=None,
help='Drop some classes from auto-labelling')
parser.add_argument('--img_wt_loss', action='store_true', default=False,
help='per-image class-weighted loss')
parser.add_argument('--rmi_loss', action='store_true', default=False,
help='use RMI loss')
parser.add_argument('--batch_weighting', action='store_true', default=False,
help=('Batch weighting for class (use nll class weighting using '
'batch stats'))
parser.add_argument('--jointwtborder', action='store_true', default=False,
help='Enable boundary label relaxation')
parser.add_argument('--strict_bdr_cls', type=str, default='',
help='Enable boundary label relaxation for specific classes')
parser.add_argument('--rlx_off_epoch', type=int, default=-1,
help='Turn off border relaxation after specific epoch count')
parser.add_argument('--rescale', type=float, default=1.0,
help='Warm Restarts new lr ratio compared to original lr')
parser.add_argument('--repoly', type=float, default=1.5,
help='Warm Restart new poly exp')
parser.add_argument('--apex', action='store_true', default=False,
help='Use Nvidia Apex Distributed Data Parallel')
parser.add_argument('--fp16', action='store_true', default=False,
help='Use Nvidia Apex AMP')
parser.add_argument('--local_rank', default=0, type=int,
help='parameter used by apex library')
parser.add_argument('--global_rank', default=0, type=int,
help='parameter used by apex library')
parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer')
parser.add_argument('--amsgrad', action='store_true', help='amsgrad for adam')
parser.add_argument('--freeze_trunk', action='store_true', default=False)
parser.add_argument('--hardnm', default=0, type=int,
help=('0 means no aug, 1 means hard negative mining '
'iter 1, 2 means hard negative mining iter 2'))
parser.add_argument('--trunk', type=str, default='resnet101',
help='trunk model, can be: resnet101 (default), resnet50')
parser.add_argument('--max_epoch', type=int, default=180)
parser.add_argument('--max_cu_epoch', type=int, default=150,
help='Class Uniform Max Epochs')
parser.add_argument('--start_epoch', type=int, default=0)
parser.add_argument('--color_aug', type=float,
default=0.25, help='level of color augmentation')
parser.add_argument('--gblur', action='store_true', default=False,
help='Use Guassian Blur Augmentation')
parser.add_argument('--bblur', action='store_true', default=False,
help='Use Bilateral Blur Augmentation')
parser.add_argument('--brt_aug', action='store_true', default=False,
help='Use brightness augmentation')
parser.add_argument('--lr_schedule', type=str, default='poly',
help='name of lr schedule: poly')
parser.add_argument('--poly_exp', type=float, default=1.0,
help='polynomial LR exponent')
parser.add_argument('--poly_step', type=int, default=110,
help='polynomial epoch step')
parser.add_argument('--bs_trn', type=int, default=2,
help='Batch size for training per gpu')
parser.add_argument('--bs_val', type=int, default=1,
help='Batch size for Validation per gpu')
parser.add_argument('--crop_size', type=str, default='896',
help=('training crop size: either scalar or h,w'))
parser.add_argument('--scale_min', type=float, default=0.5,
help='dynamically scale training images down to this size')
parser.add_argument('--scale_max', type=float, default=2.0,
help='dynamically scale training images up to this size')
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--snapshot', type=str, default=None)
parser.add_argument('--resume', type=str, default=None,
help=('continue training from a checkpoint. weights, '
'optimizer, schedule are restored'))
parser.add_argument('--restore_optimizer', action='store_true', default=False)
parser.add_argument('--restore_net', action='store_true', default=False)
parser.add_argument('--exp', type=str, default='default',
help='experiment directory name')
parser.add_argument('--result_dir', type=str, default='./logs',
help='where to write log output')
parser.add_argument('--syncbn', action='store_true', default=False,
help='Use Synchronized BN')
parser.add_argument('--dump_augmentation_images', action='store_true', default=False,
help='Dump Augmentated Images for sanity check')
parser.add_argument('--test_mode', action='store_true', default=False,
help=('Minimum testing to verify nothing failed, '
'Runs code for 1 epoch of train and val'))
parser.add_argument('-wb', '--wt_bound', type=float, default=1.0,
help='Weight Scaling for the losses')
parser.add_argument('--maxSkip', type=int, default=0,
help='Skip x number of frames of video augmented dataset')
parser.add_argument('--scf', action='store_true', default=False,
help='scale correction factor')
# Full Crop Training
parser.add_argument('--full_crop_training', action='store_true', default=False,
help='Full Crop Training')
# Multi Scale Inference
parser.add_argument('--multi_scale_inference', action='store_true',
help='Run multi scale inference')
parser.add_argument('--default_scale', type=float, default=1.0,
help='default scale to run validation')
parser.add_argument('--log_msinf_to_tb', action='store_true', default=False,
help='Log multi-scale Inference to Tensorboard')
parser.add_argument('--eval', type=str, default=None,
help=('just run evaluation, can be set to val or trn or '
'folder'))
parser.add_argument('--eval_folder', type=str, default=None,
help='path to frames to evaluate')
parser.add_argument('--three_scale', action='store_true', default=False)
parser.add_argument('--alt_two_scale', action='store_true', default=False)
parser.add_argument('--do_flip', action='store_true', default=False)
parser.add_argument('--extra_scales', type=str, default='0.5,2.0')
parser.add_argument('--n_scales', type=str, default=None)
parser.add_argument('--align_corners', action='store_true',
default=False)
parser.add_argument('--translate_aug_fix', action='store_true', default=False)
parser.add_argument('--mscale_lo_scale', type=float, default=0.5,
help='low resolution training scale')
parser.add_argument('--pre_size', type=int, default=None,
help=('resize long edge of images to this before'
' augmentation'))
parser.add_argument('--amp_opt_level', default='O1', type=str,
help=('amp optimization level'))
parser.add_argument('--rand_augment', default=None,
help='RandAugment setting: set to \'N,M\'')
parser.add_argument('--init_decoder', default=False, action='store_true',
help='initialize decoder with kaiming normal')
parser.add_argument('--dump_topn', type=int, default=0,
help='Dump worst val images')
parser.add_argument('--dump_assets', action='store_true',
help='Dump interesting assets')
parser.add_argument('--dump_all_images', action='store_true',
help='Dump all images, not just a subset')
parser.add_argument('--dump_for_submission', action='store_true',
help='Dump assets for submission')
parser.add_argument('--dump_for_auto_labelling', action='store_true',
help='Dump assets for autolabelling')
parser.add_argument('--dump_topn_all', action='store_true', default=False,
help='dump topN worst failures')
parser.add_argument('--custom_coarse_prob', type=float, default=None,
help='Custom Coarse Prob')
parser.add_argument('--only_coarse', action='store_true', default=False)
parser.add_argument('--mask_out_cityscapes', action='store_true',
default=False)
parser.add_argument('--ocr_aspp', action='store_true', default=False)
parser.add_argument('--map_crop_val', action='store_true', default=False)
parser.add_argument('--aspp_bot_ch', type=int, default=None)
parser.add_argument('--trial', type=int, default=None)
parser.add_argument('--mscale_cat_scale_flt', action='store_true',
default=False)
parser.add_argument('--mscale_dropout', action='store_true',
default=False)
parser.add_argument('--mscale_no3x3', action='store_true',
default=False, help='no inner 3x3')
parser.add_argument('--mscale_old_arch', action='store_true',
default=False, help='use old attention head')
parser.add_argument('--mscale_init', type=float, default=None,
help='default attention initialization')
parser.add_argument('--attnscale_bn_head', action='store_true',
default=False)
parser.add_argument('--set_cityscapes_root', type=str, default=None,
help='override cityscapes default root dir')
parser.add_argument('--ocr_alpha', type=float, default=None,
help='set HRNet OCR auxiliary loss weight')
parser.add_argument('--val_freq', type=int, default=1,
help='how often (in epochs) to run validation')
parser.add_argument('--deterministic', action='store_true',
default=False)
parser.add_argument('--summary', action='store_true',
default=False)
parser.add_argument('--segattn_bot_ch', type=int, default=None,
help='bottleneck channels for seg and attn heads')
parser.add_argument('--grad_ckpt', action='store_true',
default=False)
parser.add_argument('--no_metrics', action='store_true', default=False,
help='prevent calculation of metrics')
parser.add_argument('--supervised_mscale_loss_wt', type=float, default=None,
help='weighting for the supervised loss')
parser.add_argument('--ocr_aux_loss_rmi', action='store_true', default=False,
help='allow rmi for aux loss')
args = parser.parse_args()
args.best_record = {'epoch': -1, 'iter': 0, 'val_loss': 1e10, 'acc': 0,
'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0}
# Enable CUDNN Benchmarking optimization
torch.backends.cudnn.benchmark = True
if args.deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
args.world_size = 1
# Test Mode run two epochs with a few iterations of training and val
if args.test_mode:
args.max_epoch = 2
if 'WORLD_SIZE' in os.environ and args.apex:
# args.apex = int(os.environ['WORLD_SIZE']) > 1
args.world_size = int(os.environ['WORLD_SIZE'])
args.global_rank = int(os.environ['RANK'])
if args.apex:
print('Global Rank: {} Local Rank: {}'.format(
args.global_rank, args.local_rank))
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
def check_termination(epoch):
if AutoResume:
shouldterminate = AutoResume.termination_requested()
if shouldterminate:
if args.global_rank == 0:
progress = "Progress %d%% (epoch %d of %d)" % (
(epoch * 100 / args.max_epoch),
epoch,
args.max_epoch
)
AutoResume.request_resume(
user_dict={"RESUME_FILE": logx.save_ckpt_fn,
"TENSORBOARD_DIR": args.result_dir,
"EPOCH": str(epoch)
}, message=progress)
return 1
else:
return 1
return 0
def main():
"""
Main Function
"""
if AutoResume:
AutoResume.init()
assert args.result_dir is not None, 'need to define result_dir arg'
logx.initialize(logdir=args.result_dir,
tensorboard=True, hparams=vars(args),
global_rank=args.global_rank)
# Set up the Arguments, Tensorboard Writer, Dataloader, Loss Fn, Optimizer
assert_and_infer_cfg(args)
prep_experiment(args)
train_loader, val_loader, train_obj = \
datasets.setup_loaders(args)
criterion, criterion_val = get_loss(args)
auto_resume_details = None
if AutoResume:
auto_resume_details = AutoResume.get_resume_details()
if auto_resume_details:
checkpoint_fn = auto_resume_details.get("RESUME_FILE", None)
checkpoint = torch.load(checkpoint_fn,
map_location=torch.device('cpu'))
args.result_dir = auto_resume_details.get("TENSORBOARD_DIR", None)
args.start_epoch = int(auto_resume_details.get("EPOCH", None)) + 1
args.restore_net = True
args.restore_optimizer = True
msg = ("Found details of a requested auto-resume: checkpoint={}"
" tensorboard={} at epoch {}")
logx.msg(msg.format(checkpoint_fn, args.result_dir,
args.start_epoch))
elif args.resume:
checkpoint = torch.load(args.resume,
map_location=torch.device('cpu'))
args.arch = checkpoint['arch']
args.start_epoch = int(checkpoint['epoch']) + 1
args.restore_net = True
args.restore_optimizer = True
msg = "Resuming from: checkpoint={}, epoch {}, arch {}"
logx.msg(msg.format(args.resume, args.start_epoch, args.arch))
elif args.snapshot:
if 'ASSETS_PATH' in args.snapshot:
args.snapshot = args.snapshot.replace('ASSETS_PATH', cfg.ASSETS_PATH)
checkpoint = torch.load(args.snapshot,
map_location=torch.device('cpu'))
args.restore_net = True
msg = "Loading weights from: checkpoint={}".format(args.snapshot)
logx.msg(msg)
net = network.get_net(args, criterion)
optim, scheduler = get_optimizer(args, net)
if args.fp16:
net, optim = amp.initialize(net, optim, opt_level=args.amp_opt_level)
net = network.wrap_network_in_dataparallel(net, args.apex)
if args.summary:
print(str(net))
from pytorchOpCounter.thop import profile
img = torch.randn(1, 3, 1024, 2048).cuda()
mask = torch.randn(1, 1, 1024, 2048).cuda()
macs, params = profile(net, inputs={'images': img, 'gts': mask})
print(f'macs {macs} params {params}')
sys.exit()
if args.restore_optimizer:
restore_opt(optim, checkpoint)
if args.restore_net:
restore_net(net, checkpoint)
if args.init_decoder:
net.module.init_mods()
torch.cuda.empty_cache()
if args.start_epoch != 0:
scheduler.step(args.start_epoch)
# There are 4 options for evaluation:
# --eval val just run validation
# --eval val --dump_assets dump all images and assets
# --eval folder just dump all basic images
# --eval folder --dump_assets dump all images and assets
if args.eval == 'val':
if args.dump_topn:
validate_topn(val_loader, net, criterion_val, optim, 0, args)
else:
validate(val_loader, net, criterion=criterion_val, optim=optim, epoch=0,
dump_assets=args.dump_assets,
dump_all_images=args.dump_all_images,
calc_metrics=not args.no_metrics)
return 0
elif args.eval == 'folder':
# Using a folder for evaluation means to not calculate metrics
validate(val_loader, net, criterion=None, optim=None, epoch=0,
calc_metrics=False, dump_assets=args.dump_assets,
dump_all_images=True)
return 0
elif args.eval is not None:
raise 'unknown eval option {}'.format(args.eval)
for epoch in range(args.start_epoch, args.max_epoch):
update_epoch(epoch)
if args.only_coarse:
train_obj.only_coarse()
train_obj.build_epoch()
if args.apex:
train_loader.sampler.set_num_samples()
elif args.class_uniform_pct:
if epoch >= args.max_cu_epoch:
train_obj.disable_coarse()
train_obj.build_epoch()
if args.apex:
train_loader.sampler.set_num_samples()
else:
train_obj.build_epoch()
else:
pass
train(train_loader, net, optim, epoch)
if args.apex:
train_loader.sampler.set_epoch(epoch + 1)
if epoch % args.val_freq == 0:
validate(val_loader, net, criterion_val, optim, epoch)
scheduler.step()
if check_termination(epoch):
return 0
def train(train_loader, net, optim, curr_epoch):
"""
Runs the training loop per epoch
train_loader: Data loader for train
net: thet network
optimizer: optimizer
curr_epoch: current epoch
return:
"""
net.train()
train_main_loss = AverageMeter()
start_time = None
warmup_iter = 10
for i, data in enumerate(train_loader):
if i <= warmup_iter:
start_time = time.time()
# inputs = (bs,3,713,713)
# gts = (bs,713,713)
images, gts, _img_name, scale_float = data
batch_pixel_size = images.size(0) * images.size(2) * images.size(3)
images, gts, scale_float = images.cuda(), gts.cuda(), scale_float.cuda()
inputs = {'images': images, 'gts': gts}
optim.zero_grad()
main_loss = net(inputs)
if args.apex:
log_main_loss = main_loss.clone().detach_()
torch.distributed.all_reduce(log_main_loss,
torch.distributed.ReduceOp.SUM)
log_main_loss = log_main_loss / args.world_size
else:
main_loss = main_loss.mean()
log_main_loss = main_loss.clone().detach_()
train_main_loss.update(log_main_loss.item(), batch_pixel_size)
if args.fp16:
with amp.scale_loss(main_loss, optim) as scaled_loss:
scaled_loss.backward()
else:
main_loss.backward()
optim.step()
if i >= warmup_iter:
curr_time = time.time()
batches = i - warmup_iter + 1
batchtime = (curr_time - start_time) / batches
else:
batchtime = 0
msg = ('[epoch {}], [iter {} / {}], [train main loss {:0.6f}],'
' [lr {:0.6f}] [batchtime {:0.3g}]')
msg = msg.format(
curr_epoch, i + 1, len(train_loader), train_main_loss.avg,
optim.param_groups[-1]['lr'], batchtime)
logx.msg(msg)
metrics = {'loss': train_main_loss.avg,
'lr': optim.param_groups[-1]['lr']}
curr_iter = curr_epoch * len(train_loader) + i
logx.metric('train', metrics, curr_iter)
if i >= 10 and args.test_mode:
del data, inputs, gts
return
del data
def validate(val_loader, net, criterion, optim, epoch,
calc_metrics=True,
dump_assets=False,
dump_all_images=False):
"""
Run validation for one epoch
:val_loader: data loader for validation
:net: the network
:criterion: loss fn
:optimizer: optimizer
:epoch: current epoch
:calc_metrics: calculate validation score
:dump_assets: dump attention prediction(s) images
:dump_all_images: dump all images, not just N
"""
dumper = ImageDumper(val_len=len(val_loader),
dump_all_images=dump_all_images,
dump_assets=dump_assets,
dump_for_auto_labelling=args.dump_for_auto_labelling,
dump_for_submission=args.dump_for_submission)
net.eval()
val_loss = AverageMeter()
iou_acc = 0
for val_idx, data in enumerate(val_loader):
input_images, labels, img_names, _ = data
if args.dump_for_auto_labelling or args.dump_for_submission:
submit_fn = '{}.png'.format(img_names[0])
if val_idx % 20 == 0:
logx.msg(f'validating[Iter: {val_idx + 1} / {len(val_loader)}]')
if os.path.exists(os.path.join(dumper.save_dir, submit_fn)):
continue
# Run network
assets, _iou_acc = \
eval_minibatch(data, net, criterion, val_loss, calc_metrics,
args, val_idx)
iou_acc += _iou_acc
input_images, labels, img_names, _ = data
dumper.dump({'gt_images': labels,
'input_images': input_images,
'img_names': img_names,
'assets': assets}, val_idx)
if val_idx > 5 and args.test_mode:
break
if val_idx % 20 == 0:
logx.msg(f'validating[Iter: {val_idx + 1} / {len(val_loader)}]')
was_best = False
if calc_metrics:
was_best = eval_metrics(iou_acc, args, net, optim, val_loss, epoch)
# Write out a summary html page and tensorboard image table
if not args.dump_for_auto_labelling and not args.dump_for_submission:
dumper.write_summaries(was_best)
if __name__ == '__main__':
main()
| semantic-segmentation-main | train.py |
# This code is adapted from: https://github.com/ZJULearning/RMI
# python 2.X, 3.X compatibility
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import torch
import torch.nn.functional as F
__all__ = ['map_get_pairs', 'log_det_by_cholesky']
def map_get_pairs(labels_4D, probs_4D, radius=3, is_combine=True):
"""get map pairs
Args:
labels_4D : labels, shape [N, C, H, W]
probs_4D : probabilities, shape [N, C, H, W]
radius : the square radius
Return:
tensor with shape [N, C, radius * radius, H - (radius - 1), W - (radius - 1)]
"""
# pad to ensure the following slice operation is valid
#pad_beg = int(radius // 2)
#pad_end = radius - pad_beg
# the original height and width
label_shape = labels_4D.size()
h, w = label_shape[2], label_shape[3]
new_h, new_w = h - (radius - 1), w - (radius - 1)
# https://pytorch.org/docs/stable/nn.html?highlight=f%20pad#torch.nn.functional.pad
#padding = (pad_beg, pad_end, pad_beg, pad_end)
#labels_4D, probs_4D = F.pad(labels_4D, padding), F.pad(probs_4D, padding)
# get the neighbors
la_ns = []
pr_ns = []
#for x in range(0, radius, 1):
for y in range(0, radius, 1):
for x in range(0, radius, 1):
la_now = labels_4D[:, :, y:y + new_h, x:x + new_w]
pr_now = probs_4D[:, :, y:y + new_h, x:x + new_w]
la_ns.append(la_now)
pr_ns.append(pr_now)
if is_combine:
# for calculating RMI
pair_ns = la_ns + pr_ns
p_vectors = torch.stack(pair_ns, dim=2)
return p_vectors
else:
# for other purpose
la_vectors = torch.stack(la_ns, dim=2)
pr_vectors = torch.stack(pr_ns, dim=2)
return la_vectors, pr_vectors
def map_get_pairs_region(labels_4D, probs_4D, radius=3, is_combine=0, num_classeses=21):
"""get map pairs
Args:
labels_4D : labels, shape [N, C, H, W].
probs_4D : probabilities, shape [N, C, H, W].
radius : The side length of the square region.
Return:
A tensor with shape [N, C, radiu * radius, H // radius, W // raidius]
"""
kernel = torch.zeros([num_classeses, 1, radius, radius]).type_as(probs_4D)
padding = radius // 2
# get the neighbours
la_ns = []
pr_ns = []
for y in range(0, radius, 1):
for x in range(0, radius, 1):
kernel_now = kernel.clone()
kernel_now[:, :, y, x] = 1.0
la_now = F.conv2d(labels_4D, kernel_now, stride=radius, padding=padding, groups=num_classeses)
pr_now = F.conv2d(probs_4D, kernel_now, stride=radius, padding=padding, groups=num_classeses)
la_ns.append(la_now)
pr_ns.append(pr_now)
if is_combine:
# for calculating RMI
pair_ns = la_ns + pr_ns
p_vectors = torch.stack(pair_ns, dim=2)
return p_vectors
else:
# for other purpose
la_vectors = torch.stack(la_ns, dim=2)
pr_vectors = torch.stack(pr_ns, dim=2)
return la_vectors, pr_vectors
return
def log_det_by_cholesky(matrix):
"""
Args:
matrix: matrix must be a positive define matrix.
shape [N, C, D, D].
Ref:
https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/ops/linalg/linalg_impl.py
"""
# This uses the property that the log det(A) = 2 * sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
chol = torch.cholesky(matrix)
#return 2.0 * torch.sum(torch.log(torch.diagonal(chol, dim1=-2, dim2=-1) + 1e-6), dim=-1)
return 2.0 * torch.sum(torch.log(torch.diagonal(chol, dim1=-2, dim2=-1) + 1e-8), dim=-1)
def batch_cholesky_inverse(matrix):
"""
Args: matrix, 4-D tensor, [N, C, M, M].
matrix must be a symmetric positive define matrix.
"""
chol_low = torch.cholesky(matrix, upper=False)
chol_low_inv = batch_low_tri_inv(chol_low)
return torch.matmul(chol_low_inv.transpose(-2, -1), chol_low_inv)
def batch_low_tri_inv(L):
"""
Batched inverse of lower triangular matrices
Args:
L : a lower triangular matrix
Ref:
https://www.pugetsystems.com/labs/hpc/PyTorch-for-Scientific-Computing
"""
n = L.shape[-1]
invL = torch.zeros_like(L)
for j in range(0, n):
invL[..., j, j] = 1.0 / L[..., j, j]
for i in range(j + 1, n):
S = 0.0
for k in range(0, i + 1):
S = S - L[..., i, k] * invL[..., k, j].clone()
invL[..., i, j] = S / L[..., i, i]
return invL
def log_det_by_cholesky_test():
"""
test for function log_det_by_cholesky()
"""
a = torch.randn(1, 4, 4)
a = torch.matmul(a, a.transpose(2, 1))
print(a)
res_1 = torch.logdet(torch.squeeze(a))
res_2 = log_det_by_cholesky(a)
print(res_1, res_2)
def batch_inv_test():
"""
test for function batch_cholesky_inverse()
"""
a = torch.randn(1, 1, 4, 4)
a = torch.matmul(a, a.transpose(-2, -1))
print(a)
res_1 = torch.inverse(a)
res_2 = batch_cholesky_inverse(a)
print(res_1, '\n', res_2)
def mean_var_test():
x = torch.randn(3, 4)
y = torch.randn(3, 4)
x_mean = x.mean(dim=1, keepdim=True)
x_sum = x.sum(dim=1, keepdim=True) / 2.0
y_mean = y.mean(dim=1, keepdim=True)
y_sum = y.sum(dim=1, keepdim=True) / 2.0
x_var_1 = torch.matmul(x - x_mean, (x - x_mean).t())
x_var_2 = torch.matmul(x, x.t()) - torch.matmul(x_sum, x_sum.t())
xy_cov = torch.matmul(x - x_mean, (y - y_mean).t())
xy_cov_1 = torch.matmul(x, y.t()) - x_sum.matmul(y_sum.t())
print(x_var_1)
print(x_var_2)
print(xy_cov, '\n', xy_cov_1)
if __name__ == '__main__':
batch_inv_test()
| semantic-segmentation-main | loss/rmi_utils.py |
"""
This code is adapted from: https://github.com/ZJULearning/RMI
The implementation of the paper:
Region Mutual Information Loss for Semantic Segmentation.
"""
# python 2.X, 3.X compatibility
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
from loss import rmi_utils
from config import cfg
from apex import amp
_euler_num = 2.718281828 # euler number
_pi = 3.14159265 # pi
_ln_2_pi = 1.837877 # ln(2 * pi)
_CLIP_MIN = 1e-6 # min clip value after softmax or sigmoid operations
_CLIP_MAX = 1.0 # max clip value after softmax or sigmoid operations
_POS_ALPHA = 5e-4 # add this factor to ensure the AA^T is positive definite
_IS_SUM = 1 # sum the loss per channel
__all__ = ['RMILoss']
class RMILoss(nn.Module):
"""
region mutual information
I(A, B) = H(A) + H(B) - H(A, B)
This version need a lot of memory if do not dwonsample.
"""
def __init__(self,
num_classes=21,
rmi_radius=3,
rmi_pool_way=1,
rmi_pool_size=4,
rmi_pool_stride=4,
loss_weight_lambda=0.5,
lambda_way=1,
ignore_index=255):
super(RMILoss, self).__init__()
self.num_classes = num_classes
# radius choices
assert rmi_radius in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.rmi_radius = rmi_radius
assert rmi_pool_way in [0, 1, 2, 3]
self.rmi_pool_way = rmi_pool_way
# set the pool_size = rmi_pool_stride
assert rmi_pool_size == rmi_pool_stride
self.rmi_pool_size = rmi_pool_size
self.rmi_pool_stride = rmi_pool_stride
self.weight_lambda = loss_weight_lambda
self.lambda_way = lambda_way
# dimension of the distribution
self.half_d = self.rmi_radius * self.rmi_radius
self.d = 2 * self.half_d
self.kernel_padding = self.rmi_pool_size // 2
# ignore class
self.ignore_index = ignore_index
def forward(self, logits_4D, labels_4D, do_rmi=True):
# explicitly disable fp16 mode because torch.cholesky and
# torch.inverse aren't supported by half
logits_4D.float()
labels_4D.float()
if cfg.TRAIN.FP16:
with amp.disable_casts():
loss = self.forward_sigmoid(logits_4D, labels_4D, do_rmi=do_rmi)
else:
loss = self.forward_sigmoid(logits_4D, labels_4D, do_rmi=do_rmi)
return loss
def forward_sigmoid(self, logits_4D, labels_4D, do_rmi=False):
"""
Using the sigmiod operation both.
Args:
logits_4D : [N, C, H, W], dtype=float32
labels_4D : [N, H, W], dtype=long
do_rmi : bool
"""
# label mask -- [N, H, W, 1]
label_mask_3D = labels_4D < self.num_classes
# valid label
valid_onehot_labels_4D = \
F.one_hot(labels_4D.long() * label_mask_3D.long(),
num_classes=self.num_classes).float()
label_mask_3D = label_mask_3D.float()
label_mask_flat = label_mask_3D.view([-1, ])
valid_onehot_labels_4D = valid_onehot_labels_4D * \
label_mask_3D.unsqueeze(dim=3)
valid_onehot_labels_4D.requires_grad_(False)
# PART I -- calculate the sigmoid binary cross entropy loss
valid_onehot_label_flat = \
valid_onehot_labels_4D.view([-1, self.num_classes]).requires_grad_(False)
logits_flat = logits_4D.permute(0, 2, 3, 1).contiguous().view([-1, self.num_classes])
# binary loss, multiplied by the not_ignore_mask
valid_pixels = torch.sum(label_mask_flat)
binary_loss = F.binary_cross_entropy_with_logits(logits_flat,
target=valid_onehot_label_flat,
weight=label_mask_flat.unsqueeze(dim=1),
reduction='sum')
bce_loss = torch.div(binary_loss, valid_pixels + 1.0)
if not do_rmi:
return bce_loss
# PART II -- get rmi loss
# onehot_labels_4D -- [N, C, H, W]
probs_4D = logits_4D.sigmoid() * label_mask_3D.unsqueeze(dim=1) + _CLIP_MIN
valid_onehot_labels_4D = valid_onehot_labels_4D.permute(0, 3, 1, 2).requires_grad_(False)
# get region mutual information
rmi_loss = self.rmi_lower_bound(valid_onehot_labels_4D, probs_4D)
# add together
#logx.msg(f'lambda_way {self.lambda_way}')
#logx.msg(f'bce_loss {bce_loss} weight_lambda {self.weight_lambda} rmi_loss {rmi_loss}')
if self.lambda_way:
final_loss = self.weight_lambda * bce_loss + rmi_loss * (1 - self.weight_lambda)
else:
final_loss = bce_loss + rmi_loss * self.weight_lambda
return final_loss
def inverse(self, x):
return torch.inverse(x)
def rmi_lower_bound(self, labels_4D, probs_4D):
"""
calculate the lower bound of the region mutual information.
Args:
labels_4D : [N, C, H, W], dtype=float32
probs_4D : [N, C, H, W], dtype=float32
"""
assert labels_4D.size() == probs_4D.size()
p, s = self.rmi_pool_size, self.rmi_pool_stride
if self.rmi_pool_stride > 1:
if self.rmi_pool_way == 0:
labels_4D = F.max_pool2d(labels_4D, kernel_size=p, stride=s, padding=self.kernel_padding)
probs_4D = F.max_pool2d(probs_4D, kernel_size=p, stride=s, padding=self.kernel_padding)
elif self.rmi_pool_way == 1:
labels_4D = F.avg_pool2d(labels_4D, kernel_size=p, stride=s, padding=self.kernel_padding)
probs_4D = F.avg_pool2d(probs_4D, kernel_size=p, stride=s, padding=self.kernel_padding)
elif self.rmi_pool_way == 2:
# interpolation
shape = labels_4D.size()
new_h, new_w = shape[2] // s, shape[3] // s
labels_4D = F.interpolate(labels_4D, size=(new_h, new_w), mode='nearest')
probs_4D = F.interpolate(probs_4D, size=(new_h, new_w), mode='bilinear', align_corners=True)
else:
raise NotImplementedError("Pool way of RMI is not defined!")
# we do not need the gradient of label.
label_shape = labels_4D.size()
n, c = label_shape[0], label_shape[1]
# combine the high dimension points from label and probability map. new shape [N, C, radius * radius, H, W]
la_vectors, pr_vectors = rmi_utils.map_get_pairs(labels_4D, probs_4D, radius=self.rmi_radius, is_combine=0)
la_vectors = la_vectors.view([n, c, self.half_d, -1]).type(torch.cuda.DoubleTensor).requires_grad_(False)
pr_vectors = pr_vectors.view([n, c, self.half_d, -1]).type(torch.cuda.DoubleTensor)
# small diagonal matrix, shape = [1, 1, radius * radius, radius * radius]
diag_matrix = torch.eye(self.half_d).unsqueeze(dim=0).unsqueeze(dim=0)
# the mean and covariance of these high dimension points
# Var(X) = E(X^2) - E(X) E(X), N * Var(X) = X^2 - X E(X)
la_vectors = la_vectors - la_vectors.mean(dim=3, keepdim=True)
la_cov = torch.matmul(la_vectors, la_vectors.transpose(2, 3))
pr_vectors = pr_vectors - pr_vectors.mean(dim=3, keepdim=True)
pr_cov = torch.matmul(pr_vectors, pr_vectors.transpose(2, 3))
# https://github.com/pytorch/pytorch/issues/7500
# waiting for batched torch.cholesky_inverse()
# pr_cov_inv = torch.inverse(pr_cov + diag_matrix.type_as(pr_cov) * _POS_ALPHA)
pr_cov_inv = self.inverse(pr_cov + diag_matrix.type_as(pr_cov) * _POS_ALPHA)
# if the dimension of the point is less than 9, you can use the below function
# to acceleration computational speed.
#pr_cov_inv = utils.batch_cholesky_inverse(pr_cov + diag_matrix.type_as(pr_cov) * _POS_ALPHA)
la_pr_cov = torch.matmul(la_vectors, pr_vectors.transpose(2, 3))
# the approxiamation of the variance, det(c A) = c^n det(A), A is in n x n shape;
# then log det(c A) = n log(c) + log det(A).
# appro_var = appro_var / n_points, we do not divide the appro_var by number of points here,
# and the purpose is to avoid underflow issue.
# If A = A^T, A^-1 = (A^-1)^T.
appro_var = la_cov - torch.matmul(la_pr_cov.matmul(pr_cov_inv), la_pr_cov.transpose(-2, -1))
#appro_var = la_cov - torch.chain_matmul(la_pr_cov, pr_cov_inv, la_pr_cov.transpose(-2, -1))
#appro_var = torch.div(appro_var, n_points.type_as(appro_var)) + diag_matrix.type_as(appro_var) * 1e-6
# The lower bound. If A is nonsingular, ln( det(A) ) = Tr( ln(A) ).
rmi_now = 0.5 * rmi_utils.log_det_by_cholesky(appro_var + diag_matrix.type_as(appro_var) * _POS_ALPHA)
#rmi_now = 0.5 * torch.logdet(appro_var + diag_matrix.type_as(appro_var) * _POS_ALPHA)
# mean over N samples. sum over classes.
rmi_per_class = rmi_now.view([-1, self.num_classes]).mean(dim=0).float()
#is_half = False
#if is_half:
# rmi_per_class = torch.div(rmi_per_class, float(self.half_d / 2.0))
#else:
rmi_per_class = torch.div(rmi_per_class, float(self.half_d))
rmi_loss = torch.sum(rmi_per_class) if _IS_SUM else torch.mean(rmi_per_class)
return rmi_loss
| semantic-segmentation-main | loss/rmi.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from runx.logx import logx
from config import cfg
from loss.rmi import RMILoss
def get_loss(args):
"""
Get the criterion based on the loss function
args: commandline arguments
return: criterion, criterion_val
"""
if args.rmi_loss:
criterion = RMILoss(
num_classes=cfg.DATASET.NUM_CLASSES,
ignore_index=cfg.DATASET.IGNORE_LABEL).cuda()
elif args.img_wt_loss:
criterion = ImageBasedCrossEntropyLoss2d(
classes=cfg.DATASET.NUM_CLASSES,
ignore_index=cfg.DATASET.IGNORE_LABEL,
upper_bound=args.wt_bound, fp16=args.fp16).cuda()
elif args.jointwtborder:
criterion = ImgWtLossSoftNLL(
classes=cfg.DATASET.NUM_CLASSES,
ignore_index=cfg.DATASET.IGNORE_LABEL,
upper_bound=args.wt_bound).cuda()
else:
criterion = CrossEntropyLoss2d(
ignore_index=cfg.DATASET.IGNORE_LABEL).cuda()
criterion_val = CrossEntropyLoss2d(
weight=None, ignore_index=cfg.DATASET.IGNORE_LABEL).cuda()
return criterion, criterion_val
class ImageBasedCrossEntropyLoss2d(nn.Module):
"""
Image Weighted Cross Entropy Loss
"""
def __init__(self, classes, weight=None, ignore_index=cfg.DATASET.IGNORE_LABEL,
norm=False, upper_bound=1.0, fp16=False):
super(ImageBasedCrossEntropyLoss2d, self).__init__()
logx.msg("Using Per Image based weighted loss")
self.num_classes = classes
self.nll_loss = nn.NLLLoss(weight, reduction='mean',
ignore_index=ignore_index)
self.norm = norm
self.upper_bound = upper_bound
self.batch_weights = cfg.BATCH_WEIGHTING
self.fp16 = fp16
def calculate_weights(self, target):
"""
Calculate weights of classes based on the training crop
"""
bins = torch.histc(target, bins=self.num_classes, min=0.0,
max=self.num_classes)
hist_norm = bins.float() / bins.sum()
if self.norm:
hist = ((bins != 0).float() * self.upper_bound *
(1 / hist_norm)) + 1.0
else:
hist = ((bins != 0).float() * self.upper_bound *
(1. - hist_norm)) + 1.0
return hist
def forward(self, inputs, targets, do_rmi=None):
if self.batch_weights:
weights = self.calculate_weights(targets)
self.nll_loss.weight = weights
loss = 0.0
for i in range(0, inputs.shape[0]):
if not self.batch_weights:
weights = self.calculate_weights(targets)
if self.fp16:
weights = weights.half()
self.nll_loss.weight = weights
loss += self.nll_loss(F.log_softmax(inputs[i].unsqueeze(0), dim=1),
targets[i].unsqueeze(0),)
return loss
class CrossEntropyLoss2d(nn.Module):
"""
Cross Entroply NLL Loss
"""
def __init__(self, weight=None, ignore_index=cfg.DATASET.IGNORE_LABEL,
reduction='mean'):
super(CrossEntropyLoss2d, self).__init__()
logx.msg("Using Cross Entropy Loss")
self.nll_loss = nn.NLLLoss(weight, reduction=reduction,
ignore_index=ignore_index)
def forward(self, inputs, targets, do_rmi=None):
return self.nll_loss(F.log_softmax(inputs, dim=1), targets)
def customsoftmax(inp, multihotmask):
"""
Custom Softmax
"""
soft = F.softmax(inp)
# This takes the mask * softmax ( sums it up hence summing up the classes
# in border then takes of summed up version vs no summed version
return torch.log(
torch.max(soft,
(multihotmask * (soft * multihotmask).sum(1, keepdim=True)))
)
class ImgWtLossSoftNLL(nn.Module):
"""
Relax Loss
"""
def __init__(self, classes, ignore_index=cfg.DATASET.IGNORE_LABEL, weights=None,
upper_bound=1.0, norm=False):
super(ImgWtLossSoftNLL, self).__init__()
self.weights = weights
self.num_classes = classes
self.ignore_index = ignore_index
self.upper_bound = upper_bound
self.norm = norm
self.batch_weights = cfg.BATCH_WEIGHTING
self.fp16 = False
def calculate_weights(self, target):
"""
Calculate weights of the classes based on training crop
"""
if len(target.shape) == 3:
hist = np.sum(target, axis=(1, 2)) * 1.0 / target.sum()
else:
hist = np.sum(target, axis=(0, 2, 3)) * 1.0 / target.sum()
if self.norm:
hist = ((hist != 0) * self.upper_bound * (1 / hist)) + 1
else:
hist = ((hist != 0) * self.upper_bound * (1 - hist)) + 1
return hist[:-1]
def custom_nll(self, inputs, target, class_weights, border_weights, mask):
"""
NLL Relaxed Loss Implementation
"""
if cfg.REDUCE_BORDER_EPOCH != -1 and \
cfg.EPOCH > cfg.REDUCE_BORDER_EPOCH:
border_weights = 1 / border_weights
target[target > 1] = 1
wts = class_weights.unsqueeze(0).unsqueeze(2).unsqueeze(3)
if self.fp16:
smax = customsoftmax(inputs, target[:, :-1, :, :].half())
loss_matrix = (-1 / border_weights *
(target[:, :-1, :, :].half() *
wts * smax).sum(1)) * (1. - mask.half())
else:
smax = customsoftmax(inputs, target[:, :-1, :, :].float())
loss_matrix = (-1 / border_weights *
(target[:, :-1, :, :].float() *
wts * smax).sum(1)) * (1. - mask.float())
loss = loss_matrix.sum()
# +1 to prevent division by 0
loss = loss / (target.shape[0] * target.shape[2] * target.shape[3] -
mask.sum().item() + 1)
return loss
def forward(self, inputs, target):
if self.fp16:
weights = target[:, :-1, :, :].sum(1).half()
else:
weights = target[:, :-1, :, :].sum(1).float()
ignore_mask = (weights == 0)
weights[ignore_mask] = 1
loss = 0
target_cpu = target.data.cpu().numpy()
if self.batch_weights:
class_weights = self.calculate_weights(target_cpu)
for i in range(0, inputs.shape[0]):
if not self.batch_weights:
class_weights = self.calculate_weights(target_cpu[i])
nll_loss = self.custom_nll(
inputs[i].unsqueeze(0),
target[i].unsqueeze(0),
class_weights=torch.Tensor(class_weights).cuda(),
border_weights=weights, mask=ignore_mask[i])
loss = loss + nll_loss
return loss
class MultiChannelBCEWithLogits(nn.Module):
def __init__(self, size_average=False, reduce=True, use_beta=True, divide_by_N=True,
ignore_label=cfg.DATASET.IGNORE_LABEL,
sum_by_non_zero_weights=False):
super(MultiChannelBCEWithLogits, self).__init__()
self.size_average = size_average
self.reduce = reduce
self.use_beta = use_beta
self.divide_by_N = divide_by_N
self.ignore_label = ignore_label
self._first_log = True
self.sum_by_non_zero_weights = sum_by_non_zero_weights
print('self.use_beta: ', use_beta)
print('self.divide_by_N: ', divide_by_N)
print('self.sum_by_non_zero_weights', self.sum_by_non_zero_weights)
def _assertNoGrad(self, variable):
assert not variable.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these variables as volatile or not requiring gradients"
def forward_simple(self, input, target, return_raw_cost=False):
self._assertNoGrad(target)
batch_size = target.shape[0]
# compute class agnostic beta
# class agnostic counting
class_agn_img = target.max(dim=1, keepdim=True)[0].view(batch_size, -1)
count_pos = (class_agn_img == 1.0).sum(dim=1).float()
count_neg = (class_agn_img == 0.0).sum(dim=1).float()
count_all = count_pos + count_neg
beta = count_neg / (count_all + 1e-8)
beta = beta.unsqueeze(1)
target = target.contiguous().view(batch_size, -1)
input = input.view(batch_size, -1)
mask = torch.ones_like(target).masked_fill(target == self.ignore_label, 0)
target = target.masked_fill(target == self.ignore_label, 0)
if not self.use_beta:
weights = 1.
else:
weights = 1. - beta + (2. * beta - 1.) * target
weights = weights * mask
if return_raw_cost:
cost = F.binary_cross_entropy_with_logits(input, target,
weight=weights,
size_average=False,
reduce=False)
return cost
if not self.sum_by_non_zero_weights:
cost = F.binary_cross_entropy_with_logits(input, target,
weight=weights,
size_average=self.size_average,
reduce=self.reduce)
else:
cost = F.binary_cross_entropy_with_logits(input, target,
weight=weights,
size_average=False,
reduce=False)
cost = cost.sum() / (torch.nonzero(weights).size(0) + 1e-8)
if not self.divide_by_N:
return cost
else:
return cost / batch_size
def forward(self, inputs, targets, inputs_weights):
#losses = []
losses = 0.0
for _input, _target, _weight in zip(inputs, targets, inputs_weights):
if _weight != 0.0:
loss = _weight * self.forward_simple(_input, _target)
#losses.append(loss)
losses += loss
return losses
class EdgeWeightedCrossEntropyLoss2d(nn.Module):
def __init__(self, classes, weight=None, size_average=False,
ignore_index=cfg.DATASET.IGNORE_LABEL,
norm=False, upper_bound=1.0):
super(EdgeWeightedCrossEntropyLoss2d, self).__init__()
logx.msg("Using Per Image based weighted loss")
self.num_classes = classes
self.nll_loss = nn.NLLLoss2d(weight, size_average,ignore_index)
self.norm = norm
self.upper_bound = upper_bound
self.batch_weights = cfg.BATCH_WEIGHTING
def calculateWeights(self, target):
hist = np.histogram(target.flatten(), range(
self.num_classes + 1), normed=True)[0]
if self.norm:
hist = ((hist != 0) * self.upper_bound * (1 / hist)) + 1
else:
hist = ((hist != 0) * self.upper_bound * (1 - hist)) + 1
return hist
def forward(self, inputs, targets, edges):
target_cpu = targets.data.cpu().numpy()
if self.batch_weights:
weights = self.calculateWeights(target_cpu)
self.nll_loss.weight = torch.Tensor(weights).cuda()
loss = 0.0
for i in range(0, inputs.shape[0]):
if not self.batch_weights:
weights = self.calculateWeights(target_cpu[i])
self.nll_loss.weight = torch.Tensor(weights).cuda()
out = self.nll_loss(F.log_softmax(inputs[i].unsqueeze(0)),
targets[i].unsqueeze(0))
out = torch.mul(edges[i].unsqueeze(0), out)
loss += out.sum() / (800 * 800)
return loss
| semantic-segmentation-main | loss/utils.py |
"""
This code adapted from: https://github.com/LiyuanLucasLiu/RAdam
From the paper: https://arxiv.org/abs/1908.03265
"""
import math
import torch
# pylint: disable=no-name-in-module
from torch.optim.optimizer import Optimizer
class RAdam(Optimizer):
"""RAdam optimizer"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0):
"""
Init
:param params: parameters to optimize
:param lr: learning rate
:param betas: beta
:param eps: numerical precision
:param weight_decay: weight decay weight
"""
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for _ in range(10)]
super().__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'RAdam does not support sparse gradients'
)
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = (
state['exp_avg_sq'].type_as(p_data_fp32)
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = (
N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = (
group['lr'] *
math.sqrt(
(1 - beta2_t) * (N_sma - 4) /
(N_sma_max - 4) * (N_sma - 2) /
N_sma * N_sma_max / (N_sma_max - 2)
) / (1 - beta1 ** state['step'])
)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(
-group['weight_decay'] * group['lr'], p_data_fp32
)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
| semantic-segmentation-main | loss/radam.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
# Optimizer and scheduler related tasks
import math
import torch
from torch import optim
from runx.logx import logx
from config import cfg
from loss.radam import RAdam
def get_optimizer(args, net):
"""
Decide Optimizer (Adam or SGD)
"""
param_groups = net.parameters()
if args.optimizer == 'sgd':
optimizer = optim.SGD(param_groups,
lr=args.lr,
weight_decay=args.weight_decay,
momentum=args.momentum,
nesterov=False)
elif args.optimizer == 'adam':
optimizer = optim.Adam(param_groups,
lr=args.lr,
weight_decay=args.weight_decay,
amsgrad=args.amsgrad)
elif args.optimizer == 'radam':
optimizer = RAdam(param_groups,
lr=args.lr,
weight_decay=args.weight_decay)
else:
raise ValueError('Not a valid optimizer')
def poly_schd(epoch):
return math.pow(1 - epoch / args.max_epoch, args.poly_exp)
def poly2_schd(epoch):
if epoch < args.poly_step:
poly_exp = args.poly_exp
else:
poly_exp = 2 * args.poly_exp
return math.pow(1 - epoch / args.max_epoch, poly_exp)
if args.lr_schedule == 'scl-poly':
if cfg.REDUCE_BORDER_EPOCH == -1:
raise ValueError('ERROR Cannot Do Scale Poly')
rescale_thresh = cfg.REDUCE_BORDER_EPOCH
scale_value = args.rescale
lambda1 = lambda epoch: \
math.pow(1 - epoch / args.max_epoch,
args.poly_exp) if epoch < rescale_thresh else scale_value * math.pow(
1 - (epoch - rescale_thresh) / (args.max_epoch - rescale_thresh),
args.repoly)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)
elif args.lr_schedule == 'poly2':
scheduler = optim.lr_scheduler.LambdaLR(optimizer,
lr_lambda=poly2_schd)
elif args.lr_schedule == 'poly':
scheduler = optim.lr_scheduler.LambdaLR(optimizer,
lr_lambda=poly_schd)
else:
raise ValueError('unknown lr schedule {}'.format(args.lr_schedule))
return optimizer, scheduler
def load_weights(net, optimizer, snapshot_file, restore_optimizer_bool=False):
"""
Load weights from snapshot file
"""
logx.msg("Loading weights from model {}".format(snapshot_file))
net, optimizer = restore_snapshot(net, optimizer, snapshot_file, restore_optimizer_bool)
return net, optimizer
def restore_snapshot(net, optimizer, snapshot, restore_optimizer_bool):
"""
Restore weights and optimizer (if needed ) for resuming job.
"""
checkpoint = torch.load(snapshot, map_location=torch.device('cpu'))
logx.msg("Checkpoint Load Compelete")
if optimizer is not None and 'optimizer' in checkpoint and restore_optimizer_bool:
optimizer.load_state_dict(checkpoint['optimizer'])
if 'state_dict' in checkpoint:
net = forgiving_state_restore(net, checkpoint['state_dict'])
else:
net = forgiving_state_restore(net, checkpoint)
return net, optimizer
def restore_opt(optimizer, checkpoint):
assert 'optimizer' in checkpoint, 'cant find optimizer in checkpoint'
optimizer.load_state_dict(checkpoint['optimizer'])
def restore_net(net, checkpoint):
assert 'state_dict' in checkpoint, 'cant find state_dict in checkpoint'
forgiving_state_restore(net, checkpoint['state_dict'])
def forgiving_state_restore(net, loaded_dict):
"""
Handle partial loading when some tensors don't match up in size.
Because we want to use models that were trained off a different
number of classes.
"""
net_state_dict = net.state_dict()
new_loaded_dict = {}
for k in net_state_dict:
new_k = k
if new_k in loaded_dict and net_state_dict[k].size() == loaded_dict[new_k].size():
new_loaded_dict[k] = loaded_dict[new_k]
else:
logx.msg("Skipped loading parameter {}".format(k))
net_state_dict.update(new_loaded_dict)
net.load_state_dict(net_state_dict)
return net
| semantic-segmentation-main | loss/optimizer.py |