import torch
import numpy as np
import torch.nn.functional as F
from torch.utils.data import DataLoader


@torch.no_grad()
def compute_confidence(estimator, dataset, device, batch_size=64, num_workers=4, pin_memory=True):
    """用Prior估计置信度（按批次算，返回{idx: conf}和{idx: label}）"""
    estimator.eval()
    loader = DataLoader(dataset, batch_size=batch_size, shuffle=False,
                        num_workers=num_workers, pin_memory=pin_memory)
    conf_by_idx, label_by_idx = {}, {}
    for inputs, labels, idxs in loader:
        inputs = inputs.to(device, non_blocking=True)
        logits = estimator(inputs)
        probs = F.softmax(logits, dim=1)
        max_probs = probs.max(dim=1)[0].cpu().tolist()
        for i, p, lbl in zip(idxs.tolist(), max_probs, labels.tolist()):
            conf_by_idx[i] = float(p)
            label_by_idx[i] = int(lbl)
    return conf_by_idx, label_by_idx


def lca_split(conf_by_idx):
    """按均值阈值划分难/易（仅索引）"""
    vals = list(conf_by_idx.values())
    thr = float(np.mean(vals)) if len(vals) > 0 else 0.5
    hard_idx = [i for i, c in conf_by_idx.items() if c < thr]
    easy_idx = [i for i, c in conf_by_idx.items() if c >= thr]
    return hard_idx, easy_idx


@torch.no_grad()
def mcc_filter(estimator, base_dataset, heavy_indices, heavy_aug_num, device,
               base_transform, heavy_transform, batch_size=64, num_workers=4, pin_memory=True):
    """对难样本的重增强按批次评估，使用(均值+标准差)阈值清洗"""
    estimator.eval()
    heavy_samples = [(i, 'heavy') for i in heavy_indices for _ in range(heavy_aug_num)]
    if len(heavy_samples) == 0:
        return []  # 没有难样本

    from .mydataset import AdaptAugDataset
    heavy_ds = AdaptAugDataset(base_dataset, heavy_samples, base_transform, heavy_transform)
    heavy_loader = DataLoader(heavy_ds, batch_size=batch_size, shuffle=False,
                              num_workers=num_workers, pin_memory=pin_memory)

    conf_list = []
    for x, y in heavy_loader:
        x = x.to(device, non_blocking=True)
        logits = estimator(x)
        probs = F.softmax(logits, dim=1)
        conf_list.extend(probs.max(dim=1)[0].cpu().tolist())

    conf_arr = np.asarray(conf_list, dtype=np.float32)
    thr = float(conf_arr.mean() + conf_arr.std()) if len(conf_arr) else 1.0

    passed = []
    ptr = 0
    for i in heavy_indices:
        for _ in range(heavy_aug_num):
            c = conf_arr[ptr];
            ptr += 1
            if c <= thr:
                passed.append((i, 'heavy'))

    print(f"[MCC] 难样本增强数: {len(heavy_samples)} → 清洗后: {len(passed)}")
    return passed


def build_augmented_loader(prior_estimator, base_dataset, device, args, base_transform, heavy_transform):
    """LCA→MCC→合并，返回最终训练DataLoader"""
    # 1) 计算置信度
    conf_by_idx, _ = compute_confidence(
        prior_estimator, base_dataset, device,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        pin_memory=args.pin_mem
    )

    # 2) LCA
    hard_idx, easy_idx = lca_split(conf_by_idx)

    # 3) MCC（难样本重增强清洗）
    kept_heavy = mcc_filter(
        prior_estimator, base_dataset, hard_idx, args.heavy_aug_num,
        device, base_transform, heavy_transform,
        batch_size=max(8, args.batch_size // 2),
        num_workers=args.num_workers,
        pin_memory=args.pin_mem
    )

    # 4) 易样本轻增强（每个易样本1份）
    light_samples = [(i, 'light') for i in easy_idx]

    # 5) 原始样本（以"轻增强路径"再过一遍）
    original_as_light = [(i, 'light') for i in range(len(base_dataset))]

    final_samples = original_as_light + kept_heavy + light_samples

    from .mydataset import AdaptAugDataset
    ds = AdaptAugDataset(base_dataset, final_samples, base_transform, heavy_transform)

    loader = DataLoader(
        ds, batch_size=args.batch_size, shuffle=True,
        num_workers=args.num_workers, pin_memory=args.pin_mem,
        drop_last=True  # 丢弃单样本批次，避免BN层问题
    )
    return loader