@inproceedings{10.1145/3489517.3530394,
author = {Chandrasekaran, Rishikanth and Ergun, Kazim and Lee, Jihyun and Nanjunda, Dhanush and Kang, Jaeyoung and Rosing, Tajana},
title = {FHDnn: communication efficient and robust federated learning for AIoT networks},
year = {2022},
isbn = {9781450391429},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3489517.3530394},
doi = {10.1145/3489517.3530394},
abstract = {The advent of IoT and advances in edge computing inspired federated learning, a distributed algorithm to enable on device learning. Transmission costs, unreliable networks and limited compute power all of which are typical characteristics of IoT networks pose a severe bottleneck for federated learning. In this work we propose FHDnn, a synergetic federated learning framework that combines the salient aspects of CNNs and Hyperdimensional Computing. FHDnn performs hyperdimensional learning on features extracted from a self-supervised contrastive learning framework to accelerate training, lower communication costs, and increase robustness to network errors by avoiding the transmission of the CNN and training only the hyperdimensional component. Compared to CNNs, we show through experiments that FHDnn reduces communication costs by 66X, local client compute and energy consumption by 1.5 - 6X, while being highly robust to network errors with minimal loss in accuracy.},
booktitle = {Proceedings of the 59th ACM/IEEE Design Automation Conference},
pages = {37–42},
numpages = {6},
keywords = {federated learning, hyperdimensional computing},
location = {San Francisco, California},
series = {DAC '22}
}

@inproceedings{10.1145/3649329.3655917,
author = {Jia, Chentao and Hu, Ming and Chen, Zekai and Yang, Yanxin and Xie, Xiaofei and Liu, Yang and Chen, Mingsong},
title = {AdaptiveFL: Adaptive Heterogeneous Federated Learning for Resource-Constrained AIoT Systems},
year = {2024},
isbn = {9798400706011},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3649329.3655917},
doi = {10.1145/3649329.3655917},
abstract = {Although Federated Learning (FL) is promising to enable collaborative learning among Artificial Intelligence of Things (AIoT) devices, it suffers from the problem of low classification performance due to various heterogeneity factors (e.g., computing capacity, memory size) of devices and uncertain operating environments. To address these issues, this paper introduces an effective FL approach named AdaptiveFL based on a novel fine-grained width-wise model pruning mechanism, which can generate various heterogeneous local models for heterogeneous AIoT devices. By using our proposed reinforcement learning-based device selection strategy, AdaptiveFL can adaptively dispatch suitable heterogeneous models to corresponding AIoT devices based on their available resources for local training. Experimental results show that, compared to state-of-the-art methods, AdaptiveFL can achieve up to 8.94\% inference improvements for both IID and non-IID scenarios.},
booktitle = {Proceedings of the 61st ACM/IEEE Design Automation Conference},
articleno = {84},
numpages = {6},
location = {San Francisco, CA, USA},
series = {DAC '24}
}

@inproceedings{10.1145/3649329.3655934,
author = {Xiao, Xiong and Duan, Mingxing and Song, Yingjie and Tang, Zhuo and Yang, Wenjing},
title = {Fake Node-Based Perception Poisoning Attacks against Federated Object Detection Learning in Mobile Computing Networks},
year = {2024},
isbn = {9798400706011},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3649329.3655934},
doi = {10.1145/3649329.3655934},
abstract = {Federated learning (FL) supports massive edge devices to collaboratively train object detection models in mobile computing scenarios. However, the distributed nature of FL exposes significant security vulnerabilities. Existing attack methods either require considerable costs to compromise the majority of participants, or suffer from poor attack success rates. Inspired by this, we devise an efficient fake node-based perception poisoning attacks strategy (FNPPA) to target such weaknesses. In particular, FNPPA poisons local data and injects multiple fake nodes to participate in aggregation, aiming to make the local poisoning model more likely to overwrite clean updates. Moreover, it can achieve greater malicious influence on target objects at a lower cost without affecting the normal detection of other objects. We demonstrate through exhaustive experiments that FNPPA exhibits superior attack impact than the state-of-the-art in terms of average precision and aggregation effect.},
booktitle = {Proceedings of the 61st ACM/IEEE Design Automation Conference},
articleno = {165},
numpages = {6},
keywords = {federated learning, object detection, mobile computing, perception poisoning attacks},
location = {San Francisco, CA, USA},
series = {DAC '24}
}

@INPROCEEDINGS{10631004,
  author={Qi, Senmao and Ma, Hao and Zou, Yifei and Yuan, Yuan and Li, Peng and Yu, Dongxiao},
  booktitle={2024 IEEE 44th International Conference on Distributed Computing Systems (ICDCS)}, 
  title={Fed-MS: Fault Tolerant Federated Edge Learning with Multiple Byzantine Servers}, 
  year={2024},
  volume={},
  number={},
  pages={982-992},
  keywords={Training;Fault tolerance;Federated learning;Fault tolerant systems;Filtering algorithms;Fasteners;Numerical models;Federated Learning;Edge Networks;Byzantine Fault Tolerance},
  doi={10.1109/ICDCS60910.2024.00095}}


@INPROCEEDINGS{10631028,
  author={Chen, Tianxiang and Wang, Feng and Qiu, Wangjie and Zhang, Qinnan and Xiong, Zehui and Zheng, Zhiming},
  booktitle={2024 IEEE 44th International Conference on Distributed Computing Systems (ICDCS)}, 
  title={Toward Free-Riding Attack on Cross-Silo Federated Learning Through Evolutionary Game}, 
  year={2024},
  volume={},
  number={},
  pages={869-880},
  keywords={Training;Economics;Federated learning;Prevention and mitigation;Computational modeling;Ecosystems;Process control;Cross-silo F1;free-riding attacks;incentive mechanism;evolutionary game theory;public goods},
  doi={10.1109/ICDCS60910.2024.00085}}


@INPROCEEDINGS{10630980,
  author={Zhang, Lan and Tang, Chen and Liu, Huiqi and Yu, Haikuo and Zhuang, Xirong and Zhao, Qi and Wang, Lei and Fang, Wenjing and Li, Xiang-Yang},
  booktitle={2024 IEEE 44th International Conference on Distributed Computing Systems (ICDCS)}, 
  title={FedMark: Large-Capacity and Robust Watermarking in Federated Learning}, 
  year={2024},
  volume={},
  number={},
  pages={821-832},
  keywords={Training;Accuracy;Federated learning;Computational modeling;Distributed databases;Watermarking;Intellectual property;federated learning;watermarking;bloom filter},
  doi={10.1109/ICDCS60910.2024.00081}}


@INPROCEEDINGS{10631003,
  author={Jeter, Tre' R. and Nguyen, Truc and Alharbi, Raed and Thai, My T.},
  booktitle={2024 IEEE 44th International Conference on Distributed Computing Systems (ICDCS)}, 
  title={OASIS: Offsetting Active Reconstruction Attacks in Federated Learning}, 
  year={2024},
  volume={},
  number={},
  pages={1004-1015},
  keywords={Training;Threat modeling;Privacy;Protocols;Federated learning;Medical services;Rendering (computer graphics);Federated Learning;Privacy;Deep Neural Networks;Reconstruction Attack;Dishonest Servers},
  doi={10.1109/ICDCS60910.2024.00097}}


@INPROCEEDINGS{10631001,
  author={Lewis, Cody and Varadharajan, Vijay and Noman, Nasimul and Tupakula, Uday and Li, Nan},
  booktitle={2024 IEEE 44th International Conference on Distributed Computing Systems (ICDCS)}, 
  title={Mitigation of Gradient Inversion Attacks in Federated Learning with Private Adaptive Optimization}, 
  year={2024},
  volume={},
  number={},
  pages={833-845},
  keywords={Training;Data privacy;Privacy;Interpolation;Federated learning;Prevention and mitigation;Neural networks;federated learning;privacy;adaptive optimization;secure aggregation},
  doi={10.1109/ICDCS60910.2024.00082}}


@inproceedings{10.1145/3627703.3650082,
author = {Cheng, Pau-Chen and Eykholt, Kevin and Gu, Zhongshu and Jamjoom, Hani and Jayaram, K. R. and Valdez, Enriquillo and Verma, Ashish},
title = {DeTA: Minimizing Data Leaks in Federated Learning via Decentralized and Trustworthy Aggregation},
year = {2024},
isbn = {9798400704376},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3627703.3650082},
doi = {10.1145/3627703.3650082},
abstract = {Federated learning (FL) relies on a central authority to oversee and aggregate model updates contributed by multiple participating parties in the training process. This centralization of sensitive model updates naturally raises concerns about the trustworthiness of the central aggregation server, as well as the potential risks associated with server failures or breaches, which could result in loss and leaks of model updates. Moreover, recent attacks have demonstrated that, by obtaining the leaked model updates, malicious actors can even reconstruct substantial amounts of private data belonging to training participants. This underscores the critical necessity to rethink the existing FL system architecture to mitigate emerging attacks in the evolving threat landscape. One straightforward approach is to fortify the central aggregator with confidential computing (CC), which offers hardware-assisted protection for runtime computation and can be remotely verified for execution integrity. However, a growing number of security vulnerabilities have surfaced in tandem with the adoption of CC, indicating that depending solely on this singular defense may not provide the requisite resilience to thwart data leaks.To address the security challenges inherent in the centralized aggregation paradigm and enhance system resilience, we introduce DeTA, an FL system architecture that employs a decentralized and trustworthy aggregation strategy with a defense-in-depth design. In DeTA, FL parties locally divide and shuffle their model updates at the parameter level, creating random partitions designated for multiple aggregators, all of which are shielded within CC execution environments. Moreover, to accommodate the multi-aggregator FL ecosystem, we have implemented a two-phase authentication protocol that enables new parties to verify all CC-protected aggregators and establish secure channels to upstream their model updates. With DeTA, model aggregation algorithms can function without any alterations. However, each aggregator is now oblivious to model architectures, possessing only a fragmented and shuffled view of each model update. This approach effectively mitigates attacks aimed at tampering with the aggregation process or exploiting leaked model updates, while also preserving training accuracy and minimizing performance overheads.},
booktitle = {Proceedings of the Nineteenth European Conference on Computer Systems},
pages = {219–235},
numpages = {17},
keywords = {Decentralized Aggregation, Federated Learning, Parameter Shuffling, Trusted Aggregation},
location = {Athens, Greece},
series = {EuroSys '24}
}

@inproceedings{10.1145/3627703.3629575,
author = {Ching, Cheng-Wei and Chen, Xin and Kim, Taehwan and Ji, Bo and Wang, Qingyang and Da Silva, Dilma and Hu, Liting},
title = {Totoro: A Scalable Federated Learning Engine for the Edge},
year = {2024},
isbn = {9798400704376},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3627703.3629575},
doi = {10.1145/3627703.3629575},
abstract = {Federated Learning (FL) is an emerging distributed machine learning (ML) technique that enables in-situ model training and inference on decentralized edge devices. We propose Totoro, a novel scalable FL engine, that enables massive FL applications to run simultaneously on edge networks. The key insight is to explore a distributed hash table (DHT)-based peer-to-peer (P2P) model to re-architect the centralized FL system design into a fully decentralized one. In contrast to previous studies where many FL applications shared one centralized parameter server, Totoro assigns a dedicated parameter server to each individual application. Any edge node can act as any application's coordinator, aggregator, client selector, worker (participant device), or any combination of the above, thereby radically improving scalability and adaptivity. Totoro introduces three innovations to realize its design: a locality-aware P2P multi-ring structure, a publish/subscribe-based forest abstraction, and a bandit-based exploitation-exploration path planning model. Real-world experiments on 500 Amazon EC2 servers show that Totoro scales gracefully with the number of FL applications and N edge nodes, speeds up the total training time by 1.2 \texttimes{} -14.0\texttimes{}, achieves O (logN) hops for model dissemination and gradient aggregation with millions of nodes, and efficiently adapts to the practical edge networks and churns.},
booktitle = {Proceedings of the Nineteenth European Conference on Computer Systems},
pages = {182–199},
numpages = {18},
keywords = {Distributed and parallel systems for machine learning, edge computing, federated learning},
location = {Athens, Greece},
series = {EuroSys '24}
}

@inproceedings{10.1145/3627703.3650081,
author = {Khan, Ahmad Faraz and Khan, Azal Ahmad and Abdelmoniem, Ahmed M. and Fountain, Samuel and Butt, Ali R. and Anwar, Ali},
title = {FLOAT: Federated Learning Optimizations with Automated Tuning},
year = {2024},
isbn = {9798400704376},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3627703.3650081},
doi = {10.1145/3627703.3650081},
abstract = {Federated Learning (FL) has emerged as a powerful approach that enables collaborative distributed model training without the need for data sharing. However, FL grapples with inherent heterogeneity challenges leading to issues such as stragglers, dropouts, and performance variations. Selection of clients to run an FL instance is crucial, but existing strategies introduce biases and participation issues and do not consider resource efficiency. Communication and training acceleration solutions proposed to increase client participation also fall short due to the dynamic nature of system resources. We address these challenges in this paper by designing FLOAT, a novel framework designed to boost FL client resource awareness. FLOAT optimizes resource utilization dynamically for meeting training deadlines, and mitigates stragglers and dropouts through various optimization techniques; leading to enhanced model convergence and improved performance. FLOAT leverages multi-objective Reinforcement Learning with Human Feedback (RLHF) to automate the selection of the optimization techniques and their configurations, tailoring them to individual client resource conditions. Moreover, FLOAT seamlessly integrates into existing FL systems, maintaining non-intrusiveness and versatility for both asynchronous and synchronous FL settings. As per our evaluations, FLOAT increases accuracy by up to 53\%, reduces client dropouts by up to 78\texttimes{}, and improves communication, computation, and memory utilization by up to 81\texttimes{}, 44\texttimes{}, and 20\texttimes{} respectively.},
booktitle = {Proceedings of the Nineteenth European Conference on Computer Systems},
pages = {200–218},
numpages = {19},
keywords = {Federated Learning, Machine Learning Systems, Resource Management},
location = {Athens, Greece},
series = {EuroSys '24}
}

@inproceedings{10.1145/3627703.3629559,
author = {Jiang, Zhifeng and Wang, Wei and Chen, Ruichuan},
title = {Dordis: Efficient Federated Learning with Dropout-Resilient Differential Privacy},
year = {2024},
isbn = {9798400704376},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3627703.3629559},
doi = {10.1145/3627703.3629559},
abstract = {Federated learning (FL) is increasingly deployed among multiple clients to train a shared model over decentralized data. To address privacy concerns, FL systems need to safeguard the clients' data from disclosure during training and control data leakage through trained models when exposed to untrusted domains. Distributed differential privacy (DP) offers an appealing solution in this regard as it achieves a balanced tradeoff between privacy and utility without a trusted server. However, existing distributed DP mechanisms are impractical in the presence of client dropout, resulting in poor privacy guarantees or degraded training accuracy. In addition, these mechanisms suffer from severe efficiency issues.We present Dordis, a distributed differentially private FL framework that is highly efficient and resilient to client dropout. Specifically, we develop a novel 'add-then-remove' scheme that enforces a required noise level precisely in each training round, even if some sampled clients drop out. This ensures that the privacy budget is utilized prudently, despite unpredictable client dynamics. To boost performance, Dordis operates as a distributed parallel architecture via encapsulating the communication and computation operations into stages. It automatically divides the global model aggregation into several chunk-aggregation tasks and pipelines them for optimal speedup. Large-scale deployment evaluations demonstrate that Dordis efficiently handles client dropout in various realistic FL scenarios, achieving the optimal privacy-utility tradeoff and accelerating training by up to 2.4\texttimes{} compared to existing solutions.},
booktitle = {Proceedings of the Nineteenth European Conference on Computer Systems},
pages = {472–488},
numpages = {17},
keywords = {Client Dropout, Distributed Differential Privacy, Federated Learning, Pipeline, Secure Aggregation},
location = {Athens, Greece},
series = {EuroSys '24}
}

@inproceedings{10.1145/3552326.3567485,
author = {Abdelmoniem, Ahmed M. and Sahu, Atal Narayan and Canini, Marco and Fahmy, Suhaib A.},
title = {REFL: Resource-Efficient Federated Learning},
year = {2023},
isbn = {9781450394871},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3552326.3567485},
doi = {10.1145/3552326.3567485},
abstract = {Federated Learning (FL) enables distributed training by learners using local data, thereby enhancing privacy and reducing communication. However, it presents numerous challenges relating to the heterogeneity of the data distribution, device capabilities, and participant availability as deployments scale, which can impact both model convergence and bias. Existing FL schemes use random participant selection to improve the fairness of the selection process; however, this can result in inefficient use of resources and lower quality training. In this work, we systematically address the question of resource efficiency in FL, showing the benefits of intelligent participant selection, and incorporation of updates from straggling participants. We demonstrate how these factors enable resource efficiency while also improving trained model quality.},
booktitle = {Proceedings of the Eighteenth European Conference on Computer Systems},
pages = {215–232},
numpages = {18},
location = {Rome, Italy},
series = {EuroSys '23}
}

@INPROCEEDINGS{10046104,
  author={Yu, Sixing and Nguyen, Phuong and Abebe, Waqwoya and Qian, Wei and Anwar, Ali and Jannesari, Ali},
  booktitle={SC22: International Conference for High Performance Computing, Networking, Storage and Analysis}, 
  title={SPATL: Salient Parameter Aggregation and Transfer Learning for Heterogeneous Federated Learning}, 
  year={2022},
  volume={},
  number={},
  pages={1-14},
  keywords={Training;Data privacy;Costs;Federated learning;High performance computing;Transfer learning;Process control;Federated Learning;Heterogeneous System;Machine Learning;ML;FL},
  doi={10.1109/SC41404.2022.00041}}


@INPROCEEDINGS{9923843,
  author={Tian, Chunlin and Li, Li and Shi, Zhan and Wang, Jun and Xu, ChengZhong},
  booktitle={2022 55th IEEE/ACM International Symposium on Microarchitecture (MICRO)}, 
  title={HARMONY: Heterogeneity-Aware Hierarchical Management for Federated Learning System}, 
  year={2022},
  volume={},
  number={},
  pages={631-645},
  keywords={Training;Performance evaluation;Runtime;Microarchitecture;Federated learning;Estimation;Training data;Federated learning;heterogeneous systems;mobile device},
  doi={10.1109/MICRO56248.2022.00049}}


