import copy
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.utils.data import DataLoader
from sklearn.preprocessing import label_binarize
from sklearn import metrics

from flcore.clients.fedcp_wrapper import wrap
from utils.data_utils import build_irg_dataloader


class clientCP:
    def __init__(self, args, id, train_idx, test_idx, **kwargs):
        """
        args               : argparse.Namespace
        id                 : 客户端编号
        train_idx/test_idx : 当前客户端样本索引 (由服务器划分)
        **kwargs           : 可能包含 ConditionalSelection 等子网
        """
        self.args = args
        # ---------- 1. 模型准备 ----------
        # a. 如果传进来的是模型“类”，先用真实参数实例化
        if isinstance(args.model, type):
            # 约定：模型 __init__(args, **model_kwargs)
            model_kwargs = getattr(args, "model_kwargs", {})  # 服务器侧可写入
            args.model = args.model(args, **model_kwargs)

        # b. 缺少 head/feature_extractor 时再 wrap（只 wrap 一次）
        if not (hasattr(args.model, "head") and hasattr(args.model, "feature_extractor")):
            args.model = wrap(
                args.model)  # ← fedcp_wrapper.wrap()  :contentReference[oaicite:0]{index=0}&#8203;:contentReference[oaicite:1]{index=1}

        # c. 为客户端拷贝一份独立权重
        backbone = copy.deepcopy(args.model).to(args.device)

        # --- 拆出组件 ---
        feature_extractor = getattr(backbone, "feature_extractor", backbone)

        # 二道保险：若拿到的仍不是 nn.Module，则回退骨干本身
        if not isinstance(feature_extractor, nn.Module):
            feature_extractor = backbone

        head_g = getattr(backbone, "head", None)
        cs_net = copy.deepcopy(kwargs.get("ConditionalSelection"))

        # (3) 组装成联邦侧自定义网络（Ensemble）
        self.model = Ensemble(
            model=backbone,  # 整体骨干
            cs=cs_net,  # Conditional-Selection 子网
            head_g=head_g,  # 分类头
            feature_extractor=feature_extractor
        ).to(args.device)

        # ---------- 2. 基本属性 ----------
        self.id = id
        self.dataset = args.dataset
        self.device = args.device
        self.num_classes = args.num_labels
        self.train_idx = train_idx
        self.test_idx = test_idx
        self.batch_size = args.batch_size
        self.learning_rate = args.local_learning_rate
        self.local_steps = args.local_steps
        self.lamda = args.lamda

        # ---------- 3. 损失 & 优化器 ----------
        self.loss = nn.CrossEntropyLoss()
        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         lr=self.learning_rate)
        self.opt = self.optimizer  # ← 补这一行即可

        # ---------- 4. 其余运行时状态 ----------
        in_dim = (list(head_g.parameters())[0].shape[1]
                  if head_g is not None
                  else list(self.model.parameters())[0].shape[0])
        self.context = torch.rand(1, in_dim).to(self.device)
        self.pm_train, self.pm_test = [], []
    '''加载训练和测试数据'''

    def load_train_data(self, batch_size=None):
        return build_irg_dataloader(self.args, "train", self.train_idx)

    def load_test_data(self, batch_size=None):
        return build_irg_dataloader(self.args, "test", self.test_idx)
    '''设置本地特征提取器的参数，双份，1份冻结'''
    def set_parameters(self, feature_extractor):
        for new_param, old_param in zip(feature_extractor.parameters(), self.model.model.feature_extractor.parameters()):
            old_param.data = new_param.data.clone()
            
        for new_param, old_param in zip(feature_extractor.parameters(), self.model.feature_extractor.parameters()):
            old_param.data = new_param.data.clone()

    '''设置头部参数'''
    def set_head_g(self, head):
        headw_ps = []
        for name, mat in self.model.model.head.named_parameters():
            if 'weight' in name:
                headw_ps.append(mat.data)
        headw_p = headw_ps[-1]
        for mat in headw_ps[-2::-1]:
            headw_p = torch.matmul(headw_p, mat)
        headw_p.detach_()
        self.context = torch.sum(headw_p, dim=0, keepdim=True)
        
        for new_param, old_param in zip(head.parameters(), self.model.head_g.parameters()):
            old_param.data = new_param.data.clone()
    '''更新条件选择模块'''
    def set_cs(self, cs):
        for new_param, old_param in zip(cs.parameters(), self.model.gate.cs.parameters()):
            old_param.data = new_param.data.clone()

    def save_con_items(self, items, tag='', item_path=None):
        self.save_item(self.pm_train, 'pm_train' + '_' + tag, item_path)
        self.save_item(self.pm_test, 'pm_test' + '_' + tag, item_path)
        for idx, it in enumerate(items):
            self.save_item(it, 'item_' + str(idx) + '_' + tag, item_path)

    def generate_upload_head(self):
        for (np, pp), (ng, pg) in zip(self.model.model.head.named_parameters(), self.model.head_g.named_parameters()):
            pg.data = pp * 0.5 + pg * 0.5

    def test_metrics(self):
        testloader = self.load_test_data()
        self.model.eval()

        test_acc = 0
        test_num = 0
        y_prob = []
        y_true = []
        self.model.gate.pm_ = []
        self.model.gate.gm_ = []
        self.pm_test = []
        
        with torch.no_grad():
            for batch in testloader:
                # Separate inputs and label from the batch tuple
                inputs = batch[:-1]  # all elements except the last are inputs
                y = batch[-1]  # last element is the label

                # Move inputs to device (handle list/tuple of tensors)
                if isinstance(inputs, (list, tuple)):
                    inputs = [t.to(self.device) if torch.is_tensor(t) else t for t in inputs]
                else:
                    inputs = inputs.to(self.device)
                y = y.to(self.device)

                # Call model with unpacked inputs
                output = self.model(inputs, is_rep=False, context=self.context)

                test_acc += (torch.sum(torch.argmax(output, dim=1) == y)).item()
                test_num += y.shape[0]

                y_prob.append(F.softmax(output).detach().cpu().numpy())
                nc = self.num_classes
                if self.num_classes == 2:
                    nc += 1
                lb = label_binarize(y.detach().cpu().numpy(), classes=np.arange(nc))
                if self.num_classes == 2:
                    lb = lb[:, :2]
                y_true.append(lb)

        y_prob = np.concatenate(y_prob, axis=0)
        y_true = np.concatenate(y_true, axis=0)

        auc = metrics.roc_auc_score(y_true, y_prob, average='micro')

        self.pm_test.extend(self.model.gate.pm_)
        
        return test_acc, test_num, auc

    '''执行本地训练'''
    def train_cs_model(self):
        #加载数据
        trainloader = self.load_train_data()
        self.model.train()#设置为训练模式
        
        for step in range(self.local_steps):#lecal_step表示本地训练的迭代次数
            #gate涉及条件策略模块
            #一下三条均未使用
            if step % 10 == 0:
                print(f"    step {step}/{self.local_steps}")
            self.model.gate.pm = []
            self.model.gate.gm = []
            self.pm_train = []
            for i, batch in enumerate(trainloader):
                inputs = batch[:-1]  # all input tensors
                y = batch[-1]  # label tensor

                # Move inputs to device
                if isinstance(inputs, (list, tuple)):
                    inputs = [t.to(self.device) if torch.is_tensor(t) else t for t in inputs]
                else:
                    inputs = inputs.to(self.device)
                y = y.to(self.device)
                '''输出分析
                   output是模型的最终输出
                   rep是当前客户端的特征表示
                   rep_base是基础特征表示'''
                '''输入分析
                   x是输入数据
                   is_rep是当前客户端的特征表示。个性化信息
                   rep_base是基础特征表示，来自全局模型。全局信息'''
                output, rep, rep_base = self.model(inputs, is_rep=True, context=self.context)
                #loss,一个是真实标签差异
                loss = self.loss(output, y)
                #mmd正则化损失
                loss += MMD(rep, rep_base, 'rbf', self.device) * self.lamda
                self.opt.zero_grad()
                loss.backward()
                self.opt.step()

        self.pm_train.extend(self.model.gate.pm)
        scores = [torch.mean(pm).item() for pm in self.pm_train]
        print(np.mean(scores), np.std(scores))

'''最大均值差异，用来衡量两个分布的相似性'''
def MMD(x, y, kernel, device='cpu'):
    """Emprical maximum mean discrepancy. The lower the result
       the more evidence that distributions are the same.

    Args:
        x: first sample, distribution P
        y: second sample, distribution Q
        kernel: kernel type such as "multiscale" or "rbf"
    """
    xx, yy, zz = torch.mm(x, x.t()), torch.mm(y, y.t()), torch.mm(x, y.t())
    rx = (xx.diag().unsqueeze(0).expand_as(xx))
    ry = (yy.diag().unsqueeze(0).expand_as(yy))
    
    dxx = rx.t() + rx - 2. * xx # Used for A in (1)
    dyy = ry.t() + ry - 2. * yy # Used for B in (1)
    dxy = rx.t() + ry - 2. * zz # Used for C in (1)
    
    XX, YY, XY = (torch.zeros(xx.shape).to(device),
                  torch.zeros(xx.shape).to(device),
                  torch.zeros(xx.shape).to(device))
    
    if kernel == "multiscale":
        
        bandwidth_range = [0.2, 0.5, 0.9, 1.3]
        for a in bandwidth_range:
            XX += a**2 * (a**2 + dxx)**-1
            YY += a**2 * (a**2 + dyy)**-1
            XY += a**2 * (a**2 + dxy)**-1
            
    if kernel == "rbf":
      
        bandwidth_range = [10, 15, 20, 50]
        for a in bandwidth_range:
            XX += torch.exp(-0.5*dxx/a)
            YY += torch.exp(-0.5*dyy/a)
            XY += torch.exp(-0.5*dxy/a)
      
    return torch.mean(XX + YY - 2. * XY)

#模型
class Ensemble(nn.Module):
    def __init__(self, model, cs, head_g, feature_extractor) -> None:
        super().__init__()

        self.model = model
        self.head_g = head_g
        self.feature_extractor = feature_extractor
        
        for param in self.head_g.parameters():
            param.requires_grad = False
        for param in self.feature_extractor.parameters():
            param.requires_grad = False
        #flag:0表示混合，1表示个人，2表示全局
        self.flag = 0
        self.tau = 1
        self.hard = False
        self.context = None

        self.gate = Gate(cs)

    def forward(self, x, is_rep=False, context=None):
        # ---------- ① 取特征 ----------
        if isinstance(x, (list, tuple)):
            # collate 的固定顺序拆包
            x_ts, x_ts_mask, ts_tt, reg_ts, \
            input_ids, attn_mask, note_time, note_time_mask = x

            rep = self.model.feature_extractor(
                x_ts, x_ts_mask, ts_tt,
                input_ids, attn_mask,
                note_time, note_time_mask,
                reg_ts=reg_ts,                # ★ 关键：显式 keyword
                return_features=True
            )
        else:
            rep = self.model.feature_extractor(x,return_features=True)

        # ---------- ② 计算 gate 输入 ----------
        gate_in = rep
        if context is not None:
            context = F.normalize(context, p=2, dim=1)
            # broadcast 到 batch 维
            gate_in = rep * context.expand_as(rep)

        # ---------- ③ 根据 flag 组合个性化 / 全局表示 ----------
        if self.flag == 0:              # 混合
            rep_p, rep_g = self.gate(rep, self.tau, self.hard, gate_in, self.flag)
            output = self.model.head(rep_p) + self.head_g(rep_g)
        elif self.flag == 1:            # 仅个性化
            rep_p = self.gate(rep, self.tau, self.hard, gate_in, self.flag)
            output = self.model.head(rep_p)
        else:                           # 仅全局
            rep_g = self.gate(rep, self.tau, self.hard, gate_in, self.flag)
            output = self.head_g(rep_g)

        # ---------- ④ 返回 ----------
        if is_rep:
            fea_base = rep                      # feature_extractor 输出
            return output, rep, fea_base
        else:
            return output

'''Gate是一个神经网络模块
   上下文选择器cs根据上下文生成掩码
   将输入特征表示rep
   分为个性化pm和全局gm部分
   支持三种模式：混合，个性化，全局
'''
class Gate(nn.Module):
    def __init__(self, cs) -> None:
        super().__init__()
        #掩码就是条件策略
        self.cs = cs#上下文选择器
        self.pm = []#训练时的个性化掩码
        self.gm = []#训练时的全局掩码
        self.pm_ = []#推理时的个性化掩码
        self.gm_ = []#推理时的全局掩码

    def forward(self, rep, tau=1, hard=False, context=None, flag=0):
        pm, gm = self.cs(context, tau=tau, hard=hard)
        if self.training:
            #训练模式
            self.pm.extend(pm)
            self.gm.extend(gm)
        else:
            #推理模式
            self.pm_.extend(pm)
            self.gm_.extend(gm)

        if flag == 0:
            #计算个性化特征表示
            rep_p = rep * pm
            rep_g = rep * gm
            return rep_p, rep_g
        elif flag == 1:
            #仅计算并返回个性化特征表示
            return rep * pm
        else:
            #计算并返回全局特征表示
            return rep * gm