import json
import logging
import multiprocessing
import sys

from multiprocessing import Process
import os
import platform
import shutil
import subprocess
import threading

import time
import traceback
import urllib
import uuid
import zipfile
from urllib.parse import urlparse, urljoin

import requests

import yaml

import fedml
from fedml import mlops
from fedml.computing.scheduler.model_scheduler.device_model_msg_object import FedMLModelMsgObject
from fedml.computing.scheduler.scheduler_core.compute_cache_manager import ComputeCacheManager

from fedml.computing.scheduler.scheduler_core.compute_utils import ComputeUtils
from fedml.core.distributed.communication.s3.remote_storage import S3Storage
from .device_model_cache import FedMLModelCache
from ..comm_utils import sys_utils, security_utils

from ..comm_utils.container_utils import ContainerUtils

from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog

from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
from ..comm_utils.yaml_utils import load_yaml_config
from .device_client_constants import ClientConstants

from ....core.mlops.mlops_metrics import MLOpsMetrics

from ....core.mlops.mlops_configs import MLOpsConfigs
from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
from ....core.mlops.mlops_status import MLOpsStatus
from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program
from .device_model_deployment import start_deployment, run_http_inference_with_curl_request
from .device_client_data_interface import FedMLClientDataInterface
from ....core.mlops.mlops_utils import MLOpsUtils
from ..comm_utils.job_utils import JobRunnerUtils
from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
from .device_mqtt_inference_protocol import FedMLMqttInference
from .device_model_db import FedMLModelDatabase
from ..comm_utils.constants import SchedulerConstants
from fedml.computing.scheduler.comm_utils.job_monitor import JobMonitor

from .device_replica_handler import FedMLDeviceReplicaHandler

from fedml.computing.scheduler.scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol
import ssl


class RunnerError(Exception):
    """ Runner failed. """
    pass


class RunnerCompletedError(Exception):
    """ Runner completed. """
    pass


class FedMLClientRunner:
    FEDML_BOOTSTRAP_RUN_OK = "[FedML]Bootstrap Finished"

    def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0):
        self.local_api_process = None
        self.run_process_event = None
        self.run_process_event_map = dict()
        self.run_process_completed_event = None
        self.run_process_completed_event_map = dict()
        self.run_inference_event_map = dict()
        self.run_inference_response_map = dict()
        self.run_process_map = dict()
        self.device_status = None
        self.current_training_status = None
        self.mqtt_mgr = None
        self.client_mqtt_mgr = None
        self.client_mqtt_is_connected = False
        self.client_mqtt_lock = None
        self.edge_id = edge_id
        self.run_id = run_id
        self.unique_device_id = None
        self.args = args
        self.request_json = request_json
        self.version = args.version
        self.device_id = args.device_id
        self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
        if args.current_running_dir is not None:
            self.cur_dir = args.current_running_dir
        self.sudo_cmd = ""
        self.is_mac = False
        if platform.system() == "Darwin":
            self.is_mac = True

        self.agent_config = agent_config
        self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
        self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
        self.fedml_data_dir = self.fedml_data_base_package_dir
        self.fedml_config_dir = os.path.join("/", "fedml", "conf")

        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {}

        self.mlops_metrics = None
        self.client_active_list = dict()
        self.infer_host = "127.0.0.1"
        self.redis_addr = "local"
        self.redis_port = "6379"
        self.redis_password = "fedml_default"

        self.model_runner_mapping = dict()
        self.ntp_offset = MLOpsUtils.get_ntp_offset()
        self.running_request_json = dict()
        self.endpoint_inference_runners = dict()
        self.mqtt_inference_obj = None

        self.subscribed_topics = list()
        self.user_name = None

        self.replica_handler = None

    def unzip_file(self, zip_file, unzip_file_path) -> str:
        unziped_file_name = ""
        if zipfile.is_zipfile(zip_file):
            with zipfile.ZipFile(zip_file, "r") as zipf:
                zipf.extractall(unzip_file_path)
                unziped_file_name = zipf.namelist()[0]
        else:
            raise Exception("Invalid zip file {}".format(zip_file))

        return unziped_file_name

    def retrieve_and_unzip_package(self, package_name, package_url):
        """
        Download the package from the url and unzip it to the local package directory
        ~/.fedml/fedml-model-client/fedml/model_packages/${end_point_id}_${end_point_name}_${model_name}_${model_version}
        Under this folder, there should be the zipped file and the unzipped folder.
        the zipped file starts with fedml_run_${end_point_id}_${end_point_name}_${model_name}_${model_version}
        """
        # Models root directory
        local_package_path = ClientConstants.get_model_package_dir()
        os.makedirs(local_package_path, exist_ok=True)

        # Specify this model directory using ${end_point_id}_${end_point_name}_${model_name}_${model_version}
        run_id = self.request_json["end_point_id"]
        end_point_name = self.request_json["end_point_name"]
        model_config = self.request_json["model_config"]
        model_name = model_config["model_name"]
        model_version = model_config["model_version"]

        model_version = model_version.replace(" ", "-")     # Avoid using space for folder name
        model_version = model_version.replace(":", "-")     # Since docker mount will conflict with ":"

        this_run_model_dir = f"{run_id}_{end_point_name}_{model_name}_{model_version}"
        this_run_model_full_path = os.path.join(local_package_path, this_run_model_dir)
        os.makedirs(this_run_model_full_path, exist_ok=True)

        # Download the zipped package, overwrite it even if it exists
        filename, filename_without_extension, file_extension = ClientConstants.get_filename_and_extension(package_url)
        local_package_file = os.path.join(this_run_model_full_path,
                                          f"fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}")
        if os.path.exists(local_package_file):
            os.remove(local_package_file)
        logging.info("Download from package_url {}".format(package_url))
        ssl._create_default_https_context = ssl._create_unverified_context
        urllib.request.urlretrieve(package_url, local_package_file,
                                   reporthook=self.package_download_progress)

        # Unzip the package in the same folder, overwrite the unzipped folder even if it exists
        unzip_package_path = os.path.join(this_run_model_full_path,
                                          f"unzip_fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}")
        try:
            shutil.rmtree(unzip_package_path, ignore_errors=True)
        except Exception as e:
            pass
        package_dir_name = self.unzip_file(local_package_file, unzip_package_path)
        unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name)
        model_bin_file = os.path.join(unzip_package_path, "fedml_model.bin")        # Will deprecated
        logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format(
            local_package_file, unzip_package_path, unzip_package_full_path))

        return unzip_package_full_path, model_bin_file

    def retrieve_binary_model_file(self, package_name, package_url):
        local_package_path = ClientConstants.get_model_package_dir()
        if not os.path.exists(local_package_path):
            os.makedirs(local_package_path, exist_ok=True)
        unzip_package_path = ClientConstants.get_model_dir()
        local_package_file = "{}".format(os.path.join(local_package_path, package_name))
        if os.path.exists(local_package_file):
            os.remove(local_package_file)
        urllib.request.urlretrieve(package_url, local_package_file,
                                   reporthook=self.package_download_progress)

        unzip_package_path = os.path.join(unzip_package_path, package_name)
        if not os.path.exists(unzip_package_path):
            os.makedirs(unzip_package_path, exist_ok=True)
        dst_model_file = os.path.join(unzip_package_path, package_name)
        if os.path.exists(local_package_file):
            shutil.copy(local_package_file, dst_model_file)

        return unzip_package_path, dst_model_file

    def package_download_progress(self, count, blksize, filesize):
        self.check_runner_stop_event()

        downloaded = count * blksize
        downloaded = filesize if downloaded > filesize else downloaded
        progress = (downloaded / filesize * 100) if filesize != 0 else 0
        progress_int = int(progress)
        downloaded_kb = format(downloaded / 1024, '.2f')

        # since this hook function is stateless, we need a state to avoid printing progress repeatedly
        if count == 0:
            self.prev_download_progress = 0
        if progress_int != self.prev_download_progress and progress_int % 5 == 0:
            self.prev_download_progress = progress_int
            logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))

    def build_dynamic_constrain_variables(self, run_id, run_config):
        pass

    def update_local_fedml_config(self, run_id, model_config, model_config_parameters):
        model_name = model_config["model_name"]
        model_storage_url = model_config["model_storage_url"]

        # Retrieve model package or model binary file.
        unzip_package_path, model_bin_file = self.retrieve_and_unzip_package(model_name, model_storage_url)

        # Load the config to memory
        fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml")

        # Inject the config from UI to pkg yaml
        package_conf_object = model_config_parameters

        # Save the config to local
        with open(fedml_local_config_file, "w") as f:
            yaml.dump(package_conf_object, f)

        logging.info("The package_conf_object is {}".format(package_conf_object))

        return unzip_package_path, model_bin_file, package_conf_object

    def build_dynamic_args(self, run_config, package_conf_object, base_dir):
        pass

    def download_model_package(self, package_name, package_url):
        # Copy config file from the client
        unzip_package_path = self.retrieve_and_unzip_package(
            package_name, package_url
        )

        return unzip_package_path

    def run(self, process_event, completed_event):
        # print(f"Model worker runner process id {os.getpid()}, run id {self.run_id}")

        if platform.system() != "Windows":
            os.setsid()

        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')

        self.run_process_event = process_event
        self.run_process_completed_event = completed_event
        run_id = self.request_json.get("end_point_id")

        try:
            FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir())
            FedMLModelDatabase.get_instance().create_table()

            MLOpsUtils.set_ntp_offset(self.ntp_offset)
            self.setup_client_mqtt_mgr()

            if not self.run_impl():
                logging.info(
                    f"[endpoint/device][{run_id}/{self.edge_id}] "
                    f"Failed to run the model deployment. run_impl return False.")

                # This if condition only happens when run_impl return False in a controllable way
                # Under this condition, the run_impl itself should have handled the cleanup
                # So no need to self.release_gpu_ids(run_id)
        except RunnerError:
            logging.error(
                f"[endpoint/device][{run_id}/{self.edge_id}] "
                f"Failed due to RunnerError {traceback.format_exc()}")
            self.release_gpu_ids(run_id)

            self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED)
            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
        except RunnerCompletedError:
            logging.error(
                f"[endpoint/device][{run_id}/{self.edge_id}] "
                f"Failed due to RunnerCompletedError {traceback.format_exc()}")
            self.release_gpu_ids(run_id)

            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
        except Exception as e:
            logging.error(
                f"[endpoint/device][{run_id}/{self.edge_id}] "
                f"Failed due to exception {traceback.format_exc()}")

            self.cleanup_run_when_starting_failed()
            self.mlops_metrics.client_send_exit_train_msg(
                run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)

            self.release_gpu_ids(run_id)

            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
            time.sleep(2)
            sys.exit(1)
        finally:
            logging.info("[Worker] Release resources after deployment.")
            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
            if self.mlops_metrics is not None:
                self.mlops_metrics.stop_sys_perf()
            time.sleep(3)
            self.release_client_mqtt_mgr()

    def release_gpu_ids(self, run_id):
        JobRunnerUtils.get_instance().release_gpu_ids(run_id, self.edge_id)

    def check_runner_stop_event(self):
        if self.run_process_event.is_set():
            logging.info("Received stopping event.")
            raise RunnerError("Runner stopped")

        if self.run_process_completed_event is not None and self.run_process_completed_event.is_set():
            logging.info("Received completed event.")
            raise RunnerCompletedError("Runner completed")

    def run_impl(self):
        # Get deployment params
        run_id = self.request_json["end_point_id"]
        end_point_name = self.request_json["end_point_name"]
        device_ids = self.request_json["device_ids"]
        master_ip = self.request_json["master_node_ip"]
        model_config = self.request_json["model_config"]
        model_name = model_config["model_name"]
        model_id = model_config["model_id"]
        model_version = model_config["model_version"]
        model_config_parameters = self.request_json["parameters"]
        inference_port = model_config_parameters.get("worker_internal_port",
                                                     ClientConstants.MODEL_INFERENCE_DEFAULT_PORT)
        inference_port_external = model_config_parameters.get("worker_external_port", inference_port)
        inference_engine = model_config_parameters.get("inference_engine",
                                                       ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT)
        inference_end_point_id = run_id

        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)

        logging.info(f"[Worker] Received model deployment request from master for endpoint {run_id}.")
        if self.replica_handler is not None:
            logging.info(f"=================Worker replica Handler ======================"
                         f"Reconcile with num diff {self.replica_handler.replica_num_diff} "
                         f"and version diff {self.replica_handler.replica_version_diff}."
                         f"=============================================================")
        else:
            logging.error(f"[Worker] Replica handler is None.")
            return False

        self.check_runner_stop_event()

        # Report the deployment status to mlops
        self.mlops_metrics.report_client_training_status(
            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING,
            is_from_model=True, running_json=json.dumps(self.request_json), run_id=run_id)
        self.mlops_metrics.report_client_training_status(
            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING,
            is_from_model=True, run_id=run_id)

        self.check_runner_stop_event()

        # Reconcile the replica number (op: add, remove)
        prev_rank, op, op_num = self.replica_handler.reconcile_num_replica()

        # Reconcile the replica version (op: update)
        replica_rank_to_update = []
        if not op:
            replica_rank_to_update, op = self.replica_handler.reconcile_replica_version()

        if not op:
            logging.info("[Worker] No need to reconcile.")
            return True

        logging.info(
            f"================Worker Reconcile Operations ======================\n"
            f" op: {op}; op num: {op_num}.\n"
            f"==================================================================\n")

        # If not rollback, download package from MLOps; otherwise, use the backup package
        if op != "rollback":
            logging.info("Download and unzip model to local...")
            unzip_package_path, _, _ = \
                self.update_local_fedml_config(run_id, model_config, model_config_parameters)
            if unzip_package_path is None:
                logging.info("Failed to update local fedml config.")
                self.check_runner_stop_event()
                self.cleanup_run_when_starting_failed()
                self.mlops_metrics.client_send_exit_train_msg(run_id, self.edge_id,
                                                              ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
                return False

            if not os.path.exists(unzip_package_path):
                logging.info("Failed to unzip file.")
                self.check_runner_stop_event()
                self.cleanup_run_when_starting_failed()
                self.mlops_metrics.client_send_exit_train_msg(run_id, self.edge_id,
                                                              ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
                return False
        else:
            logging.info("Try to use backup package to rollback...")
            # Find folder under "~/.fedml/fedml-model-client/fedml/model_packages \
            # /${end_point_id}_${end_point_name}_${model_name}_${model_version}"
            backup_folder_full_path = None
            models_root_dir = ClientConstants.get_model_package_dir()

            # Find the version (notified by master) to rollback
            version_diff_dict = self.request_json["replica_version_diff"][str(self.edge_id)]
            version_rollback_to = None
            for replica_no, rollback_ops in version_diff_dict.items():
                version_rollback_to = rollback_ops["new_version"]     # Note that new_version is the version to rollback
                break
            if version_rollback_to is None:
                logging.error(f"No old version found for run_id: {self.run_id} "
                              f"edge_id: {self.edge_id}, rollback failed. No old version found in request_json.")
                return False
            model_version = version_rollback_to

            # Format the version to match the folder name
            model_version_formatted = version_rollback_to.replace(" ", "-")
            model_version_formatted = model_version_formatted.replace(":", "-")

            last_run_folder_sub_fd = f"{run_id}_{end_point_name}_{model_name}_{model_version_formatted}"
            for folder in os.listdir(models_root_dir):
                if last_run_folder_sub_fd in folder:
                    backup_folder_full_path = os.path.join(models_root_dir, folder)
                    break
            if backup_folder_full_path is None:
                logging.error(f"No backup folder found for run_id: {self.run_id} edge_id: {self.edge_id} "
                              f"under {models_root_dir} with sub folder {last_run_folder_sub_fd}, rollback failed.")
                return False

            # Inside backup folder, find unzipped package with prefix unzip_fedml_run
            unzip_package_path_parent = None
            for folder in os.listdir(backup_folder_full_path):
                if folder.startswith("unzip_fedml_run"):
                    unzip_package_path_parent = os.path.join(backup_folder_full_path, folder)
                    break

            # Inside unzip folder, find the unzipped package, should be the only one
            unzip_package_path = None
            for folder in os.listdir(unzip_package_path_parent):
                if os.path.isdir(os.path.join(unzip_package_path_parent, folder)):
                    unzip_package_path = os.path.join(unzip_package_path_parent, folder)
                    break

            if unzip_package_path is None:
                logging.error(f"No unzipped package found for run_id: {self.run_id} edge_id: {self.edge_id} "
                              f"under {backup_folder_full_path}, rollback failed.")
                return False

        self.check_runner_stop_event()

        running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
            "", "", model_version, {}, {}

        if op == "add":
            worker_ip = self.get_ip_address(self.request_json)
            for rank in range(prev_rank + 1, prev_rank + 1 + op_num):
                try:
                    running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
                        start_deployment(
                            end_point_id=inference_end_point_id, end_point_name=end_point_name, model_id=model_id,
                            model_version=model_version, model_storage_local_path=unzip_package_path,
                            inference_model_name=model_name, inference_engine=inference_engine,
                            infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id,
                            master_device_id=device_ids[0], replica_rank=rank,
                            gpu_per_replica=int(self.replica_handler.gpu_per_replica)
                        )
                except Exception as e:
                    inference_output_url = ""
                    logging.error(f"[Worker] Exception at deployment: {traceback.format_exc()}")

                if inference_output_url == "":
                    logging.error("[Worker] Failed to deploy the model.")

                    # Release the gpu occupancy
                    FedMLModelCache.get_instance().set_redis_params()
                    replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
                        run_id, end_point_name, model_name, self.edge_id, rank + 1)
                    logging.info(f"Release gpu ids {replica_occupied_gpu_ids_str} for "
                                 f"failed deployment of replica no {rank + 1}.")

                    if replica_occupied_gpu_ids_str is not None:
                        replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
                        JobRunnerUtils.get_instance().release_partial_job_gpu(run_id,
                                                                              self.edge_id, replica_occupied_gpu_ids)

                    # Send failed result back to master
                    result_payload = self.send_deployment_results(
                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
                        model_id, model_name, inference_output_url, inference_model_version, inference_port,
                        inference_engine, model_metadata, model_config)

                    self.mlops_metrics.run_id = self.run_id
                    self.mlops_metrics.broadcast_client_training_status(
                        self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
                        is_from_model=True, run_id=self.run_id)

                    self.mlops_metrics.client_send_exit_train_msg(
                        run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)

                    return False
                else:
                    # Send failed successful result back to master
                    logging.info("Finished deployment, continue to send results to master...")
                    result_payload = self.send_deployment_results(
                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                        model_id, model_name, inference_output_url, model_version, inference_port_external,
                        inference_engine, model_metadata, model_config, replica_no=rank + 1)

                    if inference_port_external != inference_port:
                        # Save internal port to local db
                        logging.info("inference_port_external {} != inference_port {}".format(
                            inference_port_external, inference_port))
                        result_payload = self.construct_deployment_results(
                            end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                            model_id, model_name, inference_output_url, model_version, inference_port,
                            inference_engine, model_metadata, model_config, replica_no=rank + 1)

                    FedMLModelDatabase.get_instance().set_deployment_result(
                        run_id, end_point_name, model_name, model_version, self.edge_id,
                        json.dumps(result_payload), replica_no=rank + 1)

                    logging.info(f"Deploy replica {rank + 1} / {prev_rank + 1 + op_num} successfully.")
                    time.sleep(5)

            time.sleep(1)
            self.mlops_metrics.run_id = self.run_id
            self.mlops_metrics.broadcast_client_training_status(
                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
                is_from_model=True, run_id=self.run_id)
            return True
        elif op == "remove":
            for rank_to_delete in range(prev_rank, prev_rank - op_num, -1):
                self.replica_handler.remove_replica(rank_to_delete)

                FedMLModelCache.get_instance().set_redis_params()
                replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
                    run_id, end_point_name, model_name, self.edge_id, rank_to_delete + 1)

                replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)

                JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids)

                FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank(
                    run_id, end_point_name, model_name, self.edge_id, rank_to_delete)

                # Report the deletion msg to master
                result_payload = self.send_deployment_results(
                    end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED,
                    model_id, model_name, inference_output_url, model_version, inference_port_external,
                    inference_engine, model_metadata, model_config, replica_no=rank_to_delete + 1)

                time.sleep(1)
                self.mlops_metrics.run_id = self.run_id
                self.mlops_metrics.broadcast_client_training_status(
                    self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
                    is_from_model=True, run_id=self.run_id)

                # TODO: If delete all replica, then delete the job and related resources
                if rank_to_delete == 0:
                    pass
            return True
        elif op == "update" or op == "rollback":
            # Update is combine of delete and add
            worker_ip = self.get_ip_address(self.request_json)
            for rank in replica_rank_to_update:
                # Delete a replica (container) if exists
                self.replica_handler.remove_replica(rank)

                FedMLModelCache.get_instance().set_redis_params()
                replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
                    run_id, end_point_name, model_name, self.edge_id, rank + 1)

                replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
                logging.info(f"Release gpu ids {replica_occupied_gpu_ids} for update / rollback.")

                # TODO (Raphael) check if this will allow another job to seize the gpu during high concurrency:
                try:
                    JobRunnerUtils.get_instance().release_partial_job_gpu(
                        run_id, self.edge_id, replica_occupied_gpu_ids)
                except Exception as e:
                    if op == "rollback":
                        pass
                    else:
                        logging.error(f"Failed to release gpu ids {replica_occupied_gpu_ids} for update.")
                        return False

                # Delete the deployment result from local db
                FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank(
                    run_id, end_point_name, model_name, self.edge_id, rank)

                logging.info(f"Delete replica with no {rank + 1} successfully.")
                time.sleep(1)

                # Add a replica (container)
                # TODO: Reduce the duplicated code
                logging.info(f"Start to deploy the model with replica no {rank + 1} ...")
                try:
                    running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
                        start_deployment(
                            end_point_id=inference_end_point_id, end_point_name=end_point_name, model_id=model_id,
                            model_version=model_version, model_storage_local_path=unzip_package_path,
                            inference_model_name=model_name, inference_engine=inference_engine,
                            infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id,
                            master_device_id=device_ids[0], replica_rank=rank,
                            gpu_per_replica=int(self.replica_handler.gpu_per_replica)
                        )
                except Exception as e:
                    inference_output_url = ""
                    logging.error(f"Exception at deployment: {traceback.format_exc()}")

                if inference_output_url == "":
                    logging.error("Failed to deploy the model...")

                    # If update failed, should release this replica's gpu
                    FedMLModelCache.get_instance().set_redis_params()
                    replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
                        run_id, end_point_name, model_name, self.edge_id, rank + 1)

                    replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)

                    JobRunnerUtils.get_instance().release_partial_job_gpu(
                        run_id, self.edge_id, replica_occupied_gpu_ids)

                    result_payload = self.send_deployment_results(
                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
                        model_id, model_name, inference_output_url, inference_model_version, inference_port,
                        inference_engine, model_metadata, model_config)

                    self.mlops_metrics.run_id = self.run_id
                    self.mlops_metrics.broadcast_client_training_status(
                        self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
                        is_from_model=True, run_id=self.run_id)

                    self.mlops_metrics.client_send_exit_train_msg(
                        run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)

                    return False
                else:
                    logging.info("Finished deployment, continue to send results to master...")
                    result_payload = self.send_deployment_results(
                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                        model_id, model_name, inference_output_url, model_version, inference_port_external,
                        inference_engine, model_metadata, model_config, replica_no=rank + 1)

                    if inference_port_external != inference_port:  # Save internal port to local db
                        logging.info("inference_port_external {} != inference_port {}".format(
                            inference_port_external, inference_port))
                        result_payload = self.construct_deployment_results(
                            end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                            model_id, model_name, inference_output_url, model_version, inference_port,
                            inference_engine, model_metadata, model_config, replica_no=rank + 1)

                    FedMLModelDatabase.get_instance().set_deployment_result(
                        run_id, end_point_name, model_name, model_version, self.edge_id,
                        json.dumps(result_payload), replica_no=rank + 1)

                    logging.info(f"Update replica with no {rank + 1}  successfully. Op num {op_num}")
                    time.sleep(5)
            time.sleep(1)
            self.mlops_metrics.run_id = self.run_id
            self.mlops_metrics.broadcast_client_training_status(
                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
                is_from_model=True, run_id=self.run_id)
            return True

        else:
            # The delete op will be handled by callback_delete_deployment
            logging.error(f"Unsupported op {op} with op num {op_num}")
            return False

    def construct_deployment_results(self, end_point_name, device_id, model_status,
                                     model_id, model_name, model_inference_url,
                                     model_version, inference_port, inference_engine,
                                     model_metadata, model_config, replica_no=1):
        deployment_results_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
                                      "model_id": model_id, "model_name": model_name,
                                      "model_url": model_inference_url, "model_version": model_version,
                                      "port": inference_port,
                                      "inference_engine": inference_engine,
                                      "model_metadata": model_metadata,
                                      "model_config": model_config,
                                      "model_status": model_status,
                                      "inference_port": inference_port,
                                      "replica_no": replica_no,
                                      }
        return deployment_results_payload

    def construct_deployment_status(self, end_point_name, device_id,
                                    model_id, model_name, model_version,
                                    model_inference_url, model_status,
                                    inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT,
                                    replica_no=1,     # start from 1
                                    ):
        deployment_status_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
                                     "device_id": device_id,
                                     "model_id": model_id, "model_name": model_name,
                                     "model_version": model_version,
                                     "model_url": model_inference_url, "model_status": model_status,
                                     "inference_port": inference_port,
                                     "replica_no": replica_no,
                                     }
        return deployment_status_payload

    def send_deployment_results(self, end_point_name, device_id, model_status,
                                model_id, model_name, model_inference_url,
                                model_version, inference_port, inference_engine,
                                model_metadata, model_config, replica_no=1):
        deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format(
            self.run_id, device_id)

        deployment_results_payload = self.construct_deployment_results(
            end_point_name, device_id, model_status,
            model_id, model_name, model_inference_url,
            model_version, inference_port, inference_engine,
            model_metadata, model_config, replica_no=replica_no)

        logging.info("[client] send_deployment_results: topic {}, payload {}.".format(deployment_results_topic,
                                                                                      deployment_results_payload))
        self.client_mqtt_mgr.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload))
        return deployment_results_payload

    def send_deployment_status(self, end_point_name, device_id,
                               model_id, model_name, model_version,
                               model_inference_url, model_status,
                               inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT,
                               replica_no=1,     # start from 1
                               ):
        # Deprecated
        pass

    def reset_devices_status(self, edge_id, status):
        self.mlops_metrics.run_id = self.run_id
        self.mlops_metrics.edge_id = edge_id
        self.mlops_metrics.broadcast_client_training_status(
            edge_id, status, is_from_model=True, run_id=self.run_id)

    def cleanup_run_when_starting_failed(self):
        logging.info("Cleanup run successfully when starting failed.")

        self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)

        time.sleep(2)

        try:
            self.mlops_metrics.stop_sys_perf()
        except Exception as ex:
            pass

        time.sleep(1)

    def cleanup_run_when_finished(self):
        logging.info("Cleanup run successfully when finished.")

        self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED)

        time.sleep(2)

        try:
            self.mlops_metrics.stop_sys_perf()
        except Exception as ex:
            pass

        time.sleep(1)

    def on_client_mqtt_disconnected(self, mqtt_client_object):
        if self.client_mqtt_lock is None:
            self.client_mqtt_lock = threading.Lock()

        self.client_mqtt_lock.acquire()
        self.client_mqtt_is_connected = False
        self.client_mqtt_lock.release()

    def on_client_mqtt_connected(self, mqtt_client_object):
        if self.mlops_metrics is None:
            self.mlops_metrics = MLOpsMetrics()

        self.mlops_metrics.set_messenger(self.client_mqtt_mgr)
        self.mlops_metrics.run_id = self.run_id

        if self.client_mqtt_lock is None:
            self.client_mqtt_lock = threading.Lock()

        self.client_mqtt_lock.acquire()
        self.client_mqtt_is_connected = True
        self.client_mqtt_lock.release()

    def setup_client_mqtt_mgr(self):
        if self.client_mqtt_mgr is not None:
            return

        if self.client_mqtt_lock is None:
            self.client_mqtt_lock = threading.Lock()

        self.client_mqtt_mgr = MqttManager(
            self.agent_config["mqtt_config"]["BROKER_HOST"],
            self.agent_config["mqtt_config"]["BROKER_PORT"],
            self.agent_config["mqtt_config"]["MQTT_USER"],
            self.agent_config["mqtt_config"]["MQTT_PWD"],
            self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
            "FedML_ModelClientAgent_Metrics_@{}@_{}_{}_{}".format(self.user_name, self.args.current_device_id,
                                                                  str(os.getpid()),
                                                                  str(uuid.uuid4()))
        )

        self.client_mqtt_mgr.add_connected_listener(self.on_client_mqtt_connected)
        self.client_mqtt_mgr.add_disconnected_listener(self.on_client_mqtt_disconnected)
        self.client_mqtt_mgr.connect()
        self.client_mqtt_mgr.loop_start()

        if self.mlops_metrics is None:
            self.mlops_metrics = MLOpsMetrics()
        self.mlops_metrics.set_messenger(self.client_mqtt_mgr)
        self.mlops_metrics.run_id = self.run_id

    def release_client_mqtt_mgr(self):
        try:
            if self.client_mqtt_mgr is not None:
                self.client_mqtt_mgr.loop_stop()
                self.client_mqtt_mgr.disconnect()

            self.client_mqtt_lock.acquire()
            if self.client_mqtt_mgr is not None:
                self.client_mqtt_is_connected = False
                self.client_mqtt_mgr = None
            self.client_mqtt_lock.release()
        except Exception:
            pass

    def ota_upgrade(self, payload, request_json):
        run_id = request_json["end_point_id"]
        force_ota = False
        ota_version = None

        try:
            parameters = request_json.get("parameters", None)
            common_args = parameters.get("common_args", None)
            force_ota = common_args.get("force_ota", False)
            ota_version = common_args.get("ota_version", None)
        except Exception as e:
            pass

        if force_ota and ota_version is not None:
            should_upgrade = True if ota_version != fedml.__version__ else False
            upgrade_version = ota_version
        else:
            try:
                fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version)
            except Exception as e:
                return

            should_upgrade = False if fedml_is_latest_version else True
            upgrade_version = remote_ver

        if should_upgrade:
            FedMLClientDataInterface.get_instance(). \
                save_started_job(run_id, self.edge_id, time.time(),
                                 ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
                                 ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
                                 payload)

            logging.info(f"Upgrade to version {upgrade_version} ...")

            sys_utils.do_upgrade(self.version, upgrade_version)

            raise Exception("Restarting after upgraded...")

    def callback_start_deployment(self, topic, payload):
        # Get deployment params
        request_json = json.loads(payload)
        run_id = request_json["end_point_id"]
        inference_end_point_id = run_id

        try:
            MLOpsConfigs.fetch_all_configs()
        except Exception as e:
            pass

        # Start log processor for current run
        run_id = inference_end_point_id
        self.args.run_id = run_id
        self.args.edge_id = self.edge_id
        MLOpsRuntimeLog(args=self.args).init_logs()
        MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
            ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)

        # self.ota_upgrade(payload, request_json)

        # Start client with multiprocessing mode
        request_json["run_id"] = run_id
        run_id_str = str(run_id)
        self.request_json = request_json
        self.running_request_json[run_id_str] = request_json
        client_runner = FedMLClientRunner(
            self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id
        )
        client_runner.infer_host = self.get_ip_address(request_json)
        self.run_process_event_map[run_id_str] = multiprocessing.Event()
        self.run_process_event_map[run_id_str].clear()
        client_runner.run_process_event = self.run_process_event_map[run_id_str]
        self.run_process_completed_event_map[run_id_str] = multiprocessing.Event()
        self.run_process_completed_event_map[run_id_str].clear()
        client_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str]
        self.model_runner_mapping[run_id_str] = client_runner

        # Replica Handler will be init for every deployment
        replica_handler = FedMLDeviceReplicaHandler(self.edge_id, self.request_json)
        client_runner.replica_handler = replica_handler

        self.run_id = run_id
        self.run_process_map[run_id_str] = Process(target=client_runner.run, args=(
            self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str]
        ))

        self.run_process_map[run_id_str].start()
        ClientConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
        ClientConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id)

    def set_runner_stopped_event(self, run_id):
        run_id_str = str(run_id)
        client_runner = self.model_runner_mapping.get(run_id_str, None)
        if client_runner is not None:
            if client_runner.run_process_event is not None:
                client_runner.run_process_event.set()
            self.model_runner_mapping.pop(run_id_str)

    def set_runner_completed_event(self, run_id):
        run_id_str = str(run_id)
        client_runner = self.model_runner_mapping.get(run_id_str, None)
        if client_runner is not None:
            if client_runner.run_process_completed_event is not None:
                client_runner.run_process_completed_event.set()
            self.model_runner_mapping.pop(run_id_str)

    def callback_delete_deployment(self, topic, payload):
        logging.info("[Worker] callback_delete_deployment")

        # Parse payload as the model message object.
        model_msg_object = FedMLModelMsgObject(topic, payload)

        # Delete all replicas on this device
        try:
            ClientConstants.remove_deployment(
                model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version,
                model_msg_object.run_id, model_msg_object.model_id, edge_id=self.edge_id)
        except Exception as e:
            logging.info(f"Exception when removing deployment {traceback.format_exc()}")
            pass

        self.set_runner_stopped_event(model_msg_object.run_id)

        logging.info(f"[endpoint/device][{model_msg_object.run_id}/{self.edge_id}] "
                     f"Release gpu resource when the worker deployment deleted.")
        JobRunnerUtils.get_instance().release_gpu_ids(model_msg_object.run_id, self.edge_id)

        if self.running_request_json.get(str(model_msg_object.run_id)) is not None:
            try:
                self.running_request_json.pop(str(model_msg_object.run_id))
            except Exception as e:
                logging.error(f"Error when removing running_request_json: {traceback.format_exc()}")
                pass

        FedMLClientDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
        FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id(
            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
            self.edge_id)

        # Delete FEDML_GLOBAL_ENDPOINT_RUN_ID_MAP_TAG-${run_id} both in redis and local db
        ComputeCacheManager.get_instance().gpu_cache.delete_endpoint_run_id_map(str(model_msg_object.run_id))

        # Delete FEDML_EDGE_ID_MODEL_DEVICE_ID_MAP_TAG-${run_id} both in redis and local db
        ComputeCacheManager.get_instance().gpu_cache.delete_edge_model_id_map(str(model_msg_object.run_id))

        # Delete FEDML_GLOBAL_DEVICE_RUN_GPU_IDS_TAG-${run_id}-${device_id} both in redis and local db
        ComputeCacheManager.get_instance().gpu_cache.delete_device_run_gpu_ids(str(self.edge_id),
                                                                               str(model_msg_object.run_id))

        # Delete FEDML_GLOBAL_DEVICE_RUN_NUM_GPUS_TAG-${run_id}-${device_id} both in redis and local db
        ComputeCacheManager.get_instance().gpu_cache.delete_device_run_num_gpus(str(self.edge_id),
                                                                                str(model_msg_object.run_id))

        # Delete FEDML_MODEL_REPLICA_GPU_IDS_TAG-${run_id}-${end_point_name}-${model_name}-${device_id}-*
        FedMLModelCache.get_instance().set_redis_params()
        FedMLModelCache.get_instance().delete_all_replica_gpu_ids(model_msg_object.run_id,
                                                                  model_msg_object.end_point_name,
                                                                  model_msg_object.model_name, self.edge_id)

    def exit_run_with_exception_entry(self):
        try:
            self.setup_client_mqtt_mgr()
            self.exit_run_with_exception()
        except Exception as e:
            self.release_client_mqtt_mgr()
            sys.exit(1)
        finally:
            self.release_client_mqtt_mgr()

    def exit_run_with_exception(self):
        logging.info("Exit run successfully.")

        ClientConstants.cleanup_learning_process(self.run_id)
        ClientConstants.cleanup_run_process(self.run_id)

        self.mlops_metrics.report_client_id_status(
            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
            is_from_model=True, run_id=self.run_id)

        time.sleep(1)

    def callback_exit_train_with_exception(self, topic, payload):
        request_json = json.loads(payload)
        is_retain = request_json.get("is_retain", False)
        if is_retain:
            return
        run_id = request_json.get("runId", None)
        if run_id is None:
            run_id = request_json.get("run_id", None)
            if run_id is None:
                run_id = request_json.get("id", None)

        if run_id is None:
            return

        # Stop client with multiprocessing mode
        self.request_json = request_json
        client_runner = FedMLClientRunner(
            self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id
        )
        try:
            Process(target=client_runner.exit_run_with_exception_entry).start()
        except Exception as e:
            pass

    def cleanup_client_with_status(self):
        self.setup_client_mqtt_mgr()

        if self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED:
            self.cleanup_run_when_finished()
        elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED:
            self.cleanup_run_when_starting_failed()

        self.release_client_mqtt_mgr()

    def callback_runner_id_status(self, topic, payload):
        # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload))

        request_json = json.loads(payload)
        run_id = request_json["run_id"]
        edge_id = request_json["edge_id"]
        status = request_json["status"]

        self.save_training_status(edge_id, status)

        if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
                status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED:
            # Stop client with multiprocessing mode
            self.request_json = request_json
            client_runner = FedMLClientRunner(
                self.args,
                edge_id=self.edge_id,
                request_json=request_json,
                agent_config=self.agent_config,
                run_id=run_id,
            )
            client_runner.device_status = status
            status_process = Process(target=client_runner.cleanup_client_with_status)
            status_process.start()
            status_process.join(15)

            # Stop log processor for current run
            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id)

    def callback_report_current_status(self, topic, payload):
        self.send_agent_active_msg()

    @staticmethod
    def process_ota_upgrade_msg():
        os.system("pip install -U fedml")

    def callback_client_ota_msg(self, topic, payload):
        request_json = json.loads(payload)
        cmd = request_json["cmd"]

        if cmd == ClientConstants.FEDML_OTA_CMD_UPGRADE:
            FedMLClientRunner.process_ota_upgrade_msg()
            # Process(target=FedMLClientRunner.process_ota_upgrade_msg).start()
            raise Exception("After upgraded, restart runner...")
        elif cmd == ClientConstants.FEDML_OTA_CMD_RESTART:
            raise Exception("Restart runner...")

    def save_training_status(self, edge_id, training_status):
        self.current_training_status = training_status
        ClientConstants.save_training_infos(edge_id, training_status)

    @staticmethod
    def get_device_id():
        device_file_path = os.path.join(ClientConstants.get_data_dir(),
                                        ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME)
        file_for_device_id = os.path.join(device_file_path, "devices.id")
        if not os.path.exists(device_file_path):
            os.makedirs(device_file_path)
        elif os.path.exists(file_for_device_id):
            with open(file_for_device_id, 'r', encoding='utf-8') as f:
                device_id_from_file = f.readline()
                if device_id_from_file is not None and device_id_from_file != "":
                    return device_id_from_file

        if platform.system() == "Darwin":
            cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \
                                 "|awk -F':' '{print $2}' "
            device_id = os.popen(cmd_get_serial_num).read()
            device_id = device_id.replace('\n', '').replace(' ', '')
            if device_id is None or device_id == "":
                device_id = hex(uuid.getnode())
            else:
                device_id = "0x" + device_id
        else:
            if "nt" in os.name:

                def get_uuid():
                    guid = ""
                    try:
                        cmd = "wmic csproduct get uuid"
                        guid = str(subprocess.check_output(cmd))
                        pos1 = guid.find("\\n") + 2
                        guid = guid[pos1:-15]
                    except Exception as ex:
                        pass
                    return str(guid)

                device_id = str(get_uuid())
                logging.info(device_id)
            elif "posix" in os.name:
                device_id = sys_utils.get_device_id_in_docker()
                if device_id is None:
                    device_id = hex(uuid.getnode())
            else:
                device_id = sys_utils.run_subprocess_open(
                    "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
                )
                device_id = hex(device_id)

        if device_id is not None and device_id != "":
            with open(file_for_device_id, 'w', encoding='utf-8') as f:
                f.write(device_id)
        else:
            device_id = hex(uuid.uuid4())
            with open(file_for_device_id, 'w', encoding='utf-8') as f:
                f.write(device_id)

        return device_id

    def get_ip_address(self, request_json):
        # OPTION 1: Use local ip
        ip = ClientConstants.get_local_ip()

        # OPTION 2: Auto detect public ip
        if "parameters" in request_json and \
                ClientConstants.AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \
                request_json["parameters"][ClientConstants.AUTO_DETECT_PUBLIC_IP]:
            ip = ClientConstants.get_public_ip()
            logging.info("Auto detect public ip for worker: " + ip)

        # OPTION 3: Use user indicated ip
        if self.infer_host is not None and self.infer_host != "127.0.0.1" and self.infer_host != "localhost":
            ip = self.infer_host

        return ip

    def bind_account_and_device_id(self, url, account_id, device_id, os_name, role="md.on_premise_device"):
        ip = requests.get('https://checkip.amazonaws.com').text.strip()
        fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
            cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \
            gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info()
        host_name = sys_utils.get_host_name()
        json_params = {
            "accountid": account_id,
            "deviceid": device_id,
            "state": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
            "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
            "type": os_name,
            "processor": cpu_info,
            "core_type": cpu_info,
            "network": "",
            "role": role,
            "os_ver": os_ver,
            "memory": total_mem,
            "ip": ip,
            "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver,
                            "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver,
                            "mpi_installed": mpi_installed, "cpu_usage": cpu_usage,
                            "available_mem": available_mem, "total_mem": total_mem,
                            "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name}
        }
        if gpu_count > 0:
            if gpu_total_mem is not None:
                json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem
            else:
                json_params["gpu"] = gpu_info if gpu_info is not None else ""
            json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else ""
            if gpu_available_mem is not None:
                json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem
            if gpu_total_mem is not None:
                json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem

            json_params["extra_infos"]["gpu_count"] = gpu_count
            json_params["extra_infos"]["gpu_vendor"] = gpu_vendor
            json_params["extra_infos"]["gpu_device_name"] = gpu_device_name

            gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count)
            gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0
            gpu_list = sys_utils.get_gpu_list()
            json_params["extra_infos"]["gpu_available_count"] = gpu_available_count
            json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list
            json_params["extra_infos"]["gpu_list"] = gpu_list
        else:
            json_params["gpu"] = "None"
            json_params["extra_infos"]["gpu_available_count"] = 0
            json_params["extra_infos"]["gpu_available_id_list"] = []
            json_params["extra_infos"]["gpu_list"] = []

        _, cert_path = MLOpsConfigs.get_request_params()
        if cert_path is not None:
            try:
                requests.session().verify = cert_path
                response = requests.post(
                    url, json=json_params, verify=True,
                    headers={"content-type": "application/json", "Connection": "close"}
                )
            except requests.exceptions.SSLError as err:
                MLOpsConfigs.install_root_ca_file()
                response = requests.post(
                    url, json=json_params, verify=True,
                    headers={"content-type": "application/json", "Connection": "close"}
                )
        else:
            response = requests.post(url, json=json_params, headers={"Connection": "close"})
        edge_id = -1
        user_name = None
        extra_url = None
        if response.status_code != 200:
            print(f"Binding to MLOps with response.status_code = {response.status_code}, "
                  f"response.content: {response.content}")
            pass
        else:
            # print("url = {}, response = {}".format(url, response))
            status_code = response.json().get("code")
            if status_code == "SUCCESS":
                edge_id = response.json().get("data").get("id")
                user_name = response.json().get("data").get("userName", None)
                extra_url = response.json().get("data").get("url", None)
                if edge_id is None or edge_id <= 0:
                    print(f"Binding to MLOps with response.status_code = {response.status_code}, "
                          f"response.content: {response.content}")
            else:
                if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR:
                    raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR)
                print(f"Binding to MLOps with response.status_code = {response.status_code}, "
                      f"response.content: {response.content}")
                return -1, None, None
        return edge_id, user_name, extra_url

    def fetch_configs(self):
        return MLOpsConfigs.fetch_all_configs()

    def send_agent_active_msg(self):
        active_topic = "flclient_agent/active"
        status = MLOpsStatus.get_instance().get_client_agent_status(self.edge_id)
        if (
                status is not None
                and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE
                and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
        ):
            return

        try:
            current_job = FedMLClientDataInterface.get_instance().get_job_by_id(self.run_id)
        except Exception as e:
            current_job = None
        if current_job is None:
            if status is not None and status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE:
                status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
            else:
                return
        else:
            status = ClientConstants.get_device_state_from_run_edge_state(current_job.status)
        active_msg = {"ID": self.edge_id, "status": status}
        MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, status)
        self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg))

    def recover_start_deployment_msg_after_upgrading(self):
        try:
            current_job = FedMLClientDataInterface.get_instance().get_current_job()
            if current_job is not None and \
                    current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING:
                logging.info("start deployment after upgrading.")
                topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
                self.callback_start_deployment(topic_start_deployment, current_job.running_json)
        except Exception as e:
            logging.info("recover starting deployment message after upgrading: {}".format(traceback.format_exc()))

    def on_agent_mqtt_connected(self, mqtt_client_object):
        # The MQTT message topic format is as follows: <sender>/<receiver>/<action>

        # Setup MQTT message listener for starting deployment
        topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
        self.mqtt_mgr.add_message_listener(topic_start_deployment, self.callback_start_deployment)

        # Setup MQTT message listener for delete deployment
        topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id))
        self.mqtt_mgr.add_message_listener(topic_delete_deployment, self.callback_delete_deployment)

        # Setup MQTT message listener for running failed
        topic_exit_train_with_exception = "flserver_agent/" + str(self.edge_id) + "/exit_train_with_exception"
        self.mqtt_mgr.add_message_listener(topic_exit_train_with_exception, self.callback_exit_train_with_exception)

        # Setup MQTT message listener for client status switching
        topic_client_status = "fl_client/flclient_agent_" + str(self.edge_id) + "/status"
        self.mqtt_mgr.add_message_listener(topic_client_status, self.callback_runner_id_status)

        # Setup MQTT message listener to report current device status.
        topic_report_status = "mlops/report_device_status"
        self.mqtt_mgr.add_message_listener(topic_report_status, self.callback_report_current_status)

        # Setup MQTT message listener to OTA messages from the MLOps.
        topic_ota_msg = "mlops/flclient_agent_" + str(self.edge_id) + "/ota"
        self.mqtt_mgr.add_message_listener(topic_ota_msg, self.callback_client_ota_msg)

        if self.mqtt_inference_obj is None:
            self.mqtt_inference_obj = FedMLMqttInference(agent_config=self.agent_config, mqtt_mgr=self.mqtt_mgr)
        self.mqtt_inference_obj.setup_listener_for_endpoint_inference_request(self.edge_id)

        # Subscribe topics for starting deployment, stopping deployment and fetching client status.
        mqtt_client_object.subscribe(topic_start_deployment, qos=2)
        mqtt_client_object.subscribe(topic_delete_deployment, qos=2)
        mqtt_client_object.subscribe(topic_client_status, qos=2)
        mqtt_client_object.subscribe(topic_report_status, qos=2)
        mqtt_client_object.subscribe(topic_exit_train_with_exception, qos=2)
        mqtt_client_object.subscribe(topic_ota_msg, qos=2)

        self.subscribed_topics.clear()
        self.subscribed_topics.append(topic_start_deployment)
        self.subscribed_topics.append(topic_delete_deployment)
        self.subscribed_topics.append(topic_client_status)
        self.subscribed_topics.append(topic_report_status)
        self.subscribed_topics.append(topic_exit_train_with_exception)
        self.subscribed_topics.append(topic_ota_msg)

        # Broadcast the first active message.
        self.send_agent_active_msg()

        # Echo results
        # print("\n\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
        # print(
        #     "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is "
        #     + str(self.unique_device_id)
        #     + "\n"
        # )

        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)

    def on_agent_mqtt_disconnected(self, mqtt_client_object):
        MLOpsStatus.get_instance().set_client_agent_status(
            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE
        )

        try:
            if self.mqtt_inference_obj is not None:
                self.mqtt_inference_obj.remove_listener_for_endpoint_inference_request(self.edge_id)
        except Exception as e:
            pass

    def setup_agent_mqtt_connection(self, service_config):
        # Setup MQTT connection
        self.mqtt_mgr = MqttManager(
            service_config["mqtt_config"]["BROKER_HOST"],
            service_config["mqtt_config"]["BROKER_PORT"],
            service_config["mqtt_config"]["MQTT_USER"],
            service_config["mqtt_config"]["MQTT_PWD"],
            service_config["mqtt_config"]["MQTT_KEEPALIVE"],
            "FedML_ModelClientAgent_Daemon_@" + self.user_name + "@_" + self.args.current_device_id + str(uuid.uuid4()),
            "flclient_agent/last_will_msg",
            json.dumps({"ID": self.edge_id, "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE})
        )
        self.agent_config = service_config

        # Init local database
        FedMLClientDataInterface.get_instance().create_job_table()
        try:
            FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir())
            FedMLModelDatabase.get_instance().create_table()
        except Exception as e:
            pass

        client_api_cmd = "fedml.computing.scheduler.model_scheduler.device_client_api:api"
        client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd)
        if client_api_pids is None or len(client_api_pids) <= 0:
            # Start local API services
            cur_dir = os.path.dirname(__file__)
            fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
            python_program = get_python_program()
            self.local_api_process = ClientConstants.exec_console_with_script(
                "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
                "--log-level critical".format(
                    python_program, client_api_cmd,
                    ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir
                ),
                should_capture_stdout=False,
                should_capture_stderr=False
            )
            # if self.local_api_process is not None and self.local_api_process.pid is not None:
            #     print(f"Model worker local API process id {self.local_api_process.pid}")

        # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor()

        # Setup MQTT connected listener
        self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected)
        self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected)
        self.mqtt_mgr.connect()

        self.setup_client_mqtt_mgr()
        self.mlops_metrics.report_client_training_status(
            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, is_from_model=True)
        MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE)

        self.recover_start_deployment_msg_after_upgrading()

    def stop_agent(self):
        if self.run_process_event is not None:
            self.run_process_event.set()

        if self.mqtt_mgr is not None:
            try:
                for topic in self.subscribed_topics:
                    self.mqtt_mgr.unsubscribe_msg(topic)
            except Exception as e:
                pass

            self.mqtt_mgr.loop_stop()
            self.mqtt_mgr.disconnect()

        self.release_client_mqtt_mgr()

    def start_agent_mqtt_loop(self, should_exit_sys=False):
        # Start MQTT message loop
        try:
            self.mqtt_mgr.loop_forever()
        except Exception as e:
            if str(e) == "Restarting after upgraded...":
                logging.info("Restarting after upgraded...")
            else:
                logging.info("Client tracing: {}".format(traceback.format_exc()))
        finally:
            self.stop_agent()

            if should_exit_sys:
                time.sleep(5)
                sys.exit(1)
