#server
import torch
import numpy as np
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
class FederatedServer:
    def __init__(self, global_model, device=None, n_clusters=5, alpha=0.6):
        """
        :param global_model: 全局模型 (nn.Module)
        :param device: 训练设备，如 "cpu" 或 "cuda"
        :param n_clusters: 聚类数量（K值）
        :param alpha: 分发时聚类聚合参数的权重（0~1之间），剩余部分来自全局聚合参数
        """
        self.global_model = global_model
        if device is None:
            self.device = next(self.global_model.parameters()).device
        else:
            self.device = device
        self.global_model.to(self.device)
        self.n_clusters = n_clusters
        self.alpha = alpha

        # 用于存储每个簇内聚合后的用户嵌入和聚类标签
        self.cluster_user_emb_dict = None  # 仅聚合用户嵌入（param[0]）
        self.cluster_labels = None

        # 存储全局网络模型参数（不含用户嵌入部分）
        self.global_net_params = None

        print("[SERVER] FederatedServer initialized on device:", self.device)

    def broadcast_model_params(self, client_cluster_label=None):
        """
        分发给客户端的模型参数：
        - 若 client_cluster_label 为 None 或未进行聚类，则直接分发全局模型参数；
        - 若指定了 client_cluster_label，则返回个性化参数：
            用户嵌入部分采用： alpha * (该簇聚合值) + (1 - alpha) * (全局用户嵌入)
            其余部分使用全局网络模型参数。
        """
        global_state = self.global_model.state_dict()
        if client_cluster_label is None or self.cluster_user_emb_dict is None:
            return {k: v.cpu() for k, v in global_state.items()}
        else:
            new_state = {}
            # 假设 state_dict 中第一个键对应用户嵌入
            keys = list(global_state.keys())
            user_emb_key = keys[0]
            # 个性化用户嵌入：混合簇内聚合和全局平均（全局模型中用户嵌入即为全局平均）
            personalized_user_emb = self.alpha * self.cluster_user_emb_dict[client_cluster_label].to(self.device) + \
                                      (1 - self.alpha) * global_state[user_emb_key].to(self.device)
            new_state[user_emb_key] = personalized_user_emb.cpu()
            # 其他部分保持全局网络模型参数不变
            for k in keys[1:]:
                new_state[k] = global_state[k].cpu()
            return new_state

    def aggregate_parameters_and_gradients(self, all_client_params, all_client_gradients):
        """
        聚合逻辑：
        1. 使用每个客户端的用户嵌入（param[0]）进行 K-means 聚类；
        2. 在每个簇内对用户嵌入进行聚合（仅聚合 param[0]）；
        3. 对所有客户端的网络模型参数（即 all_client_params[i][1:]）进行全局聚合（FedAvg）；
        4. 更新全局模型：用户嵌入采用全局平均（所有客户端用户嵌入的平均），网络模型参数采用全局聚合结果；
        5. 梯度聚合：用户嵌入梯度先在簇内聚合，再全局加权平均；网络模型梯度全局平均。
        """
        if not all_client_params or not all_client_gradients:
            print("[SERVER] No client parameters or gradients to aggregate.")
            return

        num_clients = len(all_client_params)
        print(f"[SERVER] Aggregating parameters and gradients from {num_clients} clients...")

        # 1. 提取聚类特征：使用每个客户端的用户嵌入（param[0]）
        client_features = []
        for client_param in all_client_params:
            user_emb = client_param[0]
            user_emb_flat = user_emb.detach().cpu().numpy().flatten()
            client_features.append(user_emb_flat)
        client_features = np.array(client_features)

        # 2. 标准化（StandardScaler）
        scaler = StandardScaler()
        client_features = scaler.fit_transform(client_features)

        # 3. 降维（PCA）
        pca = PCA(n_components=20)  # 可以根据需求调整主成分的数量
        client_features = pca.fit_transform(client_features)

        # 4. 使用 K-means 进行聚类
        kmeans = KMeans(n_clusters=self.n_clusters, random_state=42)
        self.cluster_labels = kmeans.fit_predict(client_features)
        print("[SERVER] Client cluster labels:", self.cluster_labels)

        # 5. 对每个簇内聚合用户嵌入（仅 param[0]）
        self.cluster_user_emb_dict = {}
        for cluster in range(self.n_clusters):
            cluster_indices = [i for i, label in enumerate(self.cluster_labels) if label == cluster]
            if not cluster_indices:
                continue
            print(f"[SERVER] Aggregating user embedding for cluster {cluster} with {len(cluster_indices)} clients.")
            cluster_user_embs = [all_client_params[i][0].to(self.device) for i in cluster_indices]
            agg_user_emb = torch.stack(cluster_user_embs, dim=0).mean(dim=0)
            self.cluster_user_emb_dict[cluster] = agg_user_emb

        # 6. 全局聚合网络模型参数：对所有客户端的 param[1:] 进行 FedAvg
        num_net_params = len(all_client_params[0]) - 1
        global_net_params = []
        for param_idx in range(1, len(all_client_params[0])):
            params = torch.stack([client_param[param_idx].to(self.device) for client_param in all_client_params])
            global_net_params.append(params.mean(dim=0))
        self.global_net_params = global_net_params

        # 7. 全局聚合用户嵌入：所有客户端用户嵌入简单平均
        all_user_embs = torch.stack([client_param[0].to(self.device) for client_param in all_client_params])
        global_user_emb = all_user_embs.mean(dim=0)

        # 8. 更新全局模型参数：第一个参数为全局用户嵌入（平均），后续为全局网络模型参数
        combined_global_params = [global_user_emb] + self.global_net_params
        for param, agg_param in zip(self.global_model.parameters(), combined_global_params):
            param.data.copy_(agg_param)

        # 9. 梯度聚合：
        # 用户嵌入梯度：在簇内先聚合，再按客户端数量加权平均
        aggregated_user_grad = None
        for cluster in range(self.n_clusters):
            cluster_indices = [i for i, label in enumerate(self.cluster_labels) if label == cluster]
            if not cluster_indices:
                continue
            cluster_user_grads = [all_client_gradients[i][0].to(self.device) for i in cluster_indices]
            agg_grad = torch.stack(cluster_user_grads, dim=0).mean(dim=0)
            if aggregated_user_grad is None:
                aggregated_user_grad = torch.zeros_like(agg_grad)
            aggregated_user_grad += len(cluster_indices) * agg_grad
        if aggregated_user_grad is not None:
            aggregated_user_grad /= num_clients

        # 网络模型梯度：对所有客户端 param[1:] 全局平均
        global_net_grads = []
        for param_idx in range(1, len(all_client_gradients[0])):
            grads = torch.stack([client_grad[param_idx].to(self.device) for client_grad in all_client_gradients])
            global_net_grads.append(grads.mean(dim=0))
        # 更新全局模型梯度
        for idx, param in enumerate(self.global_model.parameters()):
            if idx == 0:
                if param.grad is None:
                    param.grad = torch.zeros_like(aggregated_user_grad)
                param.grad.data.copy_(aggregated_user_grad)
            else:
                if param.grad is None:
                    param.grad = torch.zeros_like(global_net_grads[idx - 1])
                param.grad.data.copy_(global_net_grads[idx - 1])
        print("[SERVER] Global model updated with clustered aggregation.")



