import torch
import torch.nn as nn


# meta_model输入：各子模型的损失向量 [L1, L2, ..., Ln]
# 输出：权重参数p_i（概率分布）
class Meta_model(nn.Module):
    def __init__(self, models):
        self.models = models  # 子模型集合
        n_models = len(models)
        super().__init__()
        self.fc = nn.Sequential(
            nn.Linear(n_models, 32),
            nn.ReLU(),
            nn.Linear(32, n_models),
            nn.Softmax(dim=0),  # 确保∑p_i=1
        )

    def forward(self, losses):
        # Input: losses tensor containing loss values from each model
        # Output: weight distribution p_i for ensemble
        return self.fc(losses)  # 输出权重p_i

    def predict(self, x):
        # 直接计算输出，不经过损失计算
        outputs = [model(x) for model in self.models]
        losses = torch.stack([out.sum() / out.numel() for out in outputs])

        # 获取权重
        p = self.fc(losses)

        # 使用权重计算最终输出（保持计算图）
        logits = sum(weight * output for weight, output in zip(p, outputs))

        return logits




def ensemble_predict(x, models, p):
    logits = 0

    for model, weight in zip(models, p):
        with torch.no_grad():
            y = model(x)
        logits += weight * y  # 加权logits
    return logits


def meta_batch_update(datas, labels, meta_model, ξ=0.1, ε=0.0002):
    # 对每个子模型定义一个 optimizer
    # optimizers = [
    #     torch.optim.Adam(model.parameters(), lr=0.05) for model in meta_model.models
    # ]
    phi_tilde = {
        name: torch.zeros_like(param) for name, param in meta_model.named_parameters()
    }  # 初始化快速权重
    # 生成扰动并约束范围

    # 计算多个检测模型的损失（模拟快速适应）
    losses = []
    with torch.no_grad():
        for i, model in enumerate(meta_model.models):  # 假设集成多个模型
            pred = model(datas)
            #     optimizers[i].zero_grad()
            #     pred = model(fake_x)
            loss = nn.BCEWithLogitsLoss()(pred, labels)  # 各模型损失
            # loss.backward(retain_graph=True)
            #     optimizers[i].step()
            losses.append(loss)
    # meta_model生成权重p_i
    # 使用meta_model根据损失生成权重p_i
    meta_model.zero_grad()
    p = meta_model(torch.stack(losses))

    # 计算集成模型总损失
    ensemble_logits = ensemble_predict(datas, meta_model.models, p)
    ensemble_loss = nn.BCEWithLogitsLoss()(ensemble_logits, labels)

    # 反向传播计算梯度
    ensemble_loss.backward()

    # 更新快速权重
    for name, param in meta_model.named_parameters():
        if param.grad is not None:
            phi_tilde[name] += param.grad

    # 应用快速权重更新到元模型
    for name, param in meta_model.named_parameters():
        param.data += ε * phi_tilde[name]

    return {
        "ensemble_loss": ensemble_loss.item(),
        "p": p,
    }
