import torch
import numpy as np
from torch import nn
from torch.utils.data import TensorDataset, DataLoader
import torch.nn.functional as F


def csi_temporal_contrastive_loss(csi_features, pseudo_labels, temperature=0.5):
    """时序对比损失：调整温度参数，增强鲁棒性"""
    batch_size = csi_features.shape[0]
    device = csi_features.device
    if batch_size <= 1:
        return torch.tensor(0.0, device=device)  # 避免单样本报错

    # 特征归一化与相似度矩阵
    norm_features = F.normalize(csi_features, dim=1)
    sim_matrix = torch.matmul(norm_features, norm_features.T) / temperature  # (B, B)

    # 正样本掩码：同一设备+相邻帧（依赖原始时序）
    same_label_mask = (pseudo_labels.unsqueeze(0) == pseudo_labels.unsqueeze(1)).float()
    adjacent_mask = torch.zeros_like(same_label_mask)
    for i in range(batch_size - 1):
        adjacent_mask[i, i + 1] = 1
        adjacent_mask[i + 1, i] = 1
    positive_mask = same_label_mask * adjacent_mask

    # 负样本掩码：排除自身和正样本
    negative_mask = 1 - same_label_mask - torch.eye(batch_size, device=device)

    # InfoNCE损失计算
    exp_sim = torch.exp(sim_matrix)
    positive_sum = (positive_mask * exp_sim).sum(dim=1)
    total_sum = (exp_sim * (1 - torch.eye(batch_size, device=device))).sum(dim=1)
    total_sum = torch.clamp(total_sum, min=1e-8)  # 避免除零
    loss = -torch.log(positive_sum / total_sum + 1e-8).mean()
    return loss


def split_labeled_data_by_ratio(X_test, y_test, label_ratio=0.05, min_label_per_class=2):
    """
    按比例划分有标签/无标签数据（核心修改）
    :param X_test: 目标域特征（numpy数组）
    :param y_test: 目标域标签（numpy数组）
    :param label_ratio: 有标签数据占比（如0.05=5%）
    :param min_label_per_class: 每个设备最少标签数（避免个别设备无标签）
    :return: X_labeled, y_labeled（有标签）；X_unlabeled（无标签）
    """
    X_test = np.array(X_test)
    y_test = np.array(y_test)
    labeled_indices = []

    # 遍历每个设备类别，按比例抽取标签
    for cls in np.unique(y_test):
        cls_indices = np.where(y_test == cls)[0]  # 该设备的所有样本索引
        cls_total = len(cls_indices)

        # 计算该设备应抽取的标签数（取比例值和最小标签数的最大值）
        label_num = max(int(cls_total * label_ratio), min_label_per_class)
        label_num = min(label_num, cls_total)  # 避免超过该设备总样本数

        # 随机抽取标签样本
        selected_idx = np.random.choice(cls_indices, label_num, replace=False)
        labeled_indices.extend(selected_idx)

    # 生成掩码并划分数据
    labeled_mask = np.zeros(len(y_test), dtype=bool)
    labeled_mask[labeled_indices] = True
    X_labeled = X_test[labeled_mask]
    y_labeled = y_test[labeled_mask]
    X_unlabeled = X_test[~labeled_mask]

    return X_labeled, y_labeled, X_unlabeled


def semi_supervised_finetune(model, X_labeled, y_labeled, X_unlabeled,
                             epochs=1, lr=3e-5, conf_thresh=0.6, temp_loss_weight=0.1):
    """
    半监督在线微调（支持比例标签）
    1. 冻结特征提取层，仅微调分类器+适配层
    2. 有标签数据：用真实标签纠正难分类设备
    3. 无标签数据：用高置信伪标签适配整体分布
    """
    device = next(model.parameters()).device
    model.train()

    # -------------------------- 1. 冻结特征提取层 --------------------------
    for name, param in model.named_parameters():
        if "classifier" in name:  # 仅分类器可训练（根据模型结构调整参数名）
            param.requires_grad = True
        else:
            param.requires_grad = False

    # -------------------------- 2. 数据加载 --------------------------
    # 有标签数据（带真实标签，小批次打乱）
    X_labeled_tensor = torch.tensor(X_labeled, dtype=torch.float32).to(device)
    y_labeled_tensor = torch.tensor(y_labeled, dtype=torch.long).to(device)
    labeled_dataset = TensorDataset(X_labeled_tensor, y_labeled_tensor)
    labeled_loader = DataLoader(labeled_dataset, batch_size=8, shuffle=True, drop_last=True)

    # 无标签数据（带伪标签，保留时序）
    X_unlabeled_tensor = torch.tensor(X_unlabeled, dtype=torch.float32).to(device)
    unlabeled_dataset = TensorDataset(X_unlabeled_tensor)
    unlabeled_loader = DataLoader(unlabeled_dataset, batch_size=16, shuffle=False, drop_last=True)

    # -------------------------- 3. 优化器与适配层 --------------------------
    # 仅优化可训练参数（分类器+适配层）
    trainable_params = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = torch.optim.Adam(trainable_params, lr=lr)

    # 动态特征适配层（解决通道不匹配）
    with torch.no_grad():
        sample_output = model(X_labeled_tensor[:1])
        output_dim = sample_output.shape[1]  # 模型输出维度（类别数）
    target_feat_dim = 128
    feat_adapter = nn.Linear(output_dim, target_feat_dim).to(device)
    optimizer.add_param_group({'params': feat_adapter.parameters()})

    # -------------------------- 4. 微调主循环 --------------------------
    for epoch in range(epochs):
        total_loss = 0.0
        valid_batch_count = 0

        # 交替迭代有标签和无标签加载器（按较短加载器长度对齐）
        max_iter = min(len(labeled_loader), len(unlabeled_loader))
        for (labeled_batch, unlabeled_batch) in zip(
                list(labeled_loader)[:max_iter],
                list(unlabeled_loader)[:max_iter]
        ):
            # -------------------------- 有标签损失（真实标签，优先纠正） --------------------------
            x_lab, y_lab = labeled_batch
            outputs_lab = model(x_lab)
            labeled_loss = F.cross_entropy(outputs_lab, y_lab)  # 直接用真实标签计算损失

            # -------------------------- 无标签损失（高置信伪标签，适配分布） --------------------------
            x_unlab = unlabeled_batch[0]
            outputs_unlab = model(x_unlab)
            probs_unlab = F.softmax(outputs_unlab, dim=1)
            _, y_pseudo = torch.max(probs_unlab, 1)
            confidences = torch.gather(probs_unlab, 1, y_pseudo.unsqueeze(1)).squeeze()

            # 过滤低置信样本（仅保留置信度≥conf_thresh的样本）
            valid_mask = confidences >= conf_thresh
            if valid_mask.sum() < 2:
                continue  # 样本数不足，跳过该批次

            # 筛选有效样本计算损失
            x_valid = x_unlab[valid_mask]
            outputs_valid = outputs_unlab[valid_mask]
            y_pseudo_valid = y_pseudo[valid_mask]
            conf_valid = confidences[valid_mask]

            # 动态加权分类损失
            sigma = 1.0 - conf_valid + 0.1
            sigma = sigma / sigma.mean()
            unlab_cls_loss = (sigma * F.cross_entropy(outputs_valid, y_pseudo_valid, reduction='none')).mean()

            # 时序对比损失
            feats_valid = feat_adapter(outputs_valid)
            unlab_temp_loss = csi_temporal_contrastive_loss(feats_valid, y_pseudo_valid)

            # 无标签总损失
            unlabeled_loss = unlab_cls_loss + temp_loss_weight * unlab_temp_loss

            # -------------------------- 混合损失与参数更新 --------------------------
            # 有标签损失权重更高（0.7），优先保证真实标签的监督作用
            total_batch_loss = 0.7 * labeled_loss + 0.3 * unlabeled_loss

            optimizer.zero_grad()
            total_batch_loss.backward()
            optimizer.step()

            total_loss += total_batch_loss.item()
            valid_batch_count += 1

        # 打印本轮信息
        if valid_batch_count > 0:
            avg_loss = total_loss / valid_batch_count
            print(f"半监督微调轮次 {epoch + 1}/{epochs}，平均损失：{avg_loss:.4f}")
        else:
            print(f"半监督微调轮次 {epoch + 1}/{epochs}，无有效批次（低置信样本过多）")

    model.eval()
    return model


def evaluate_detailed_classification(model, X_test, y_test, device_list, batch_size=32):
    """详细评估模型性能，返回每个设备准确率与整体准确率"""
    # 输入有效性检查
    assert len(X_test) == len(y_test), f"特征与标签长度不匹配：X={len(X_test)}, y={len(y_test)}"
    assert isinstance(X_test, np.ndarray) and isinstance(y_test, (np.ndarray, list)), "输入数据格式错误"

    # 统一标签格式为numpy数组
    y_test = np.array(y_test) if isinstance(y_test, list) else y_test
    device = next(model.parameters()).device
    model.eval()

    # 数据加载
    X_tensor = torch.tensor(X_test, dtype=torch.float32).to(device)
    y_tensor = torch.tensor(y_test, dtype=torch.long).to(device)
    dataset = TensorDataset(X_tensor, y_tensor)
    loader = DataLoader(dataset, batch_size=batch_size, shuffle=False)

    # 收集预测结果
    all_preds = []
    with torch.no_grad():
        for x, _ in loader:
            outputs = model(x)
            _, preds = torch.max(outputs, 1)
            all_preds.extend(preds.cpu().numpy().flatten())
    all_preds_np = np.array(all_preds)

    # 计算准确率
    class_acc = []
    overall_correct = 0
    overall_total = len(y_test)

    for idx, device_name in enumerate(device_list):
        device_mask = (y_test == idx)
        device_total = np.sum(device_mask)
        if device_total == 0:
            class_acc.append({
                "device_name": device_name,
                "total_samples": 0,
                "correct_samples": 0,
                "classification_acc": 0.0
            })
            continue

        device_preds = all_preds_np[device_mask]
        device_correct = np.sum(device_preds == idx)
        device_acc = 100 * device_correct / device_total

        class_acc.append({
            "device_name": device_name,
            "total_samples": device_total,
            "correct_samples": device_correct,
            "classification_acc": round(device_acc, 2)
        })
        overall_correct += device_correct

    # 整体准确率与错误分析
    overall_acc = 100 * overall_correct / overall_total if overall_total > 0 else 0.0
    error_mask = (all_preds_np != y_test)
    error_details = {
        "misclassified_indices": np.where(error_mask)[0],
        "true_labels": y_test[error_mask],
        "predicted_labels": all_preds_np[error_mask],
        "error_count": np.sum(error_mask),
        "error_rate": 100 * np.sum(error_mask) / overall_total if overall_total > 0 else 0.0
    }

    return class_acc, round(overall_acc, 2), error_details