from data import *
import time
import torch
import torch.nn as nn
import numpy as np

def APM_init_update(train_iter,feature_extractor, classifier_t, threshold):
    start_time = time.time()
    available_cls = []#存储有被预测到的类 几乎用不到
    h_dict = {}
    feat_dict = {}
    pseudo_label_list = [] #保存全部epoch的伪标签结果
    missing_cls = []#存储没有被预测到的类
    after_softmax_numpy_for_emergency = []
    feature_numpy_for_emergency = []
    max_prototype_bound = 100#最大prototype限制 不超过100

    for cls in range(2):#len(source_classes) = 31表示31个类别
        h_dict[cls] = []
        feat_dict[cls] = []

    for (target_lbcorr, label_target_lbcorr) in train_iter:
        target_lbcorr = target_lbcorr.cuda()
        fc1_lbcorr = feature_extractor.forward(target_lbcorr)#提取Feature Extractor输出 torch.Size([32, 768])
        after_softmax = classifier_t.forward(fc1_lbcorr)#提取Classifier输出 torch.Size([32, 2])
        after_softmax_numpy_for_emergency.append(after_softmax.data.cpu().numpy())
        feature_numpy_for_emergency.append(fc1_lbcorr.data.cpu().numpy())

        #生成伪标签
        if threshold is not None:
            probs = after_softmax[:, 1]  # probs为匹配的概率
            pseudo_label = [1 if p > threshold else 0 for p in probs]
            pseudo_label = torch.tensor(pseudo_label)
        else:
            # 默认情况 0.5
            pseudo_label = torch.argmax(after_softmax, dim=1)
        pseudo_label = pseudo_label.cpu()
        pseudo_label_list.append(pseudo_label.numpy())#add

        entropy = torch.sum(- after_softmax * torch.log(after_softmax + 1e-10), dim=1, keepdim=True)
        entropy_norm = entropy / np.log(after_softmax.size(1))
        entropy_norm = entropy_norm.squeeze(1)
        entropy_norm = entropy_norm.cpu()

        for cls in range(2):
            # stack H for each class
            cls_filter = (pseudo_label == cls)
            list_loc = (torch.where(cls_filter == 1))[0]
            num_element = list(list_loc.data.numpy())
            if len(list_loc) == 0:
                missing_cls.append(cls)
                continue
            available_cls.append(cls)
            filtered_ent = torch.gather(entropy_norm, dim=0, index=list_loc)
            filtered_feat = torch.gather(fc1_lbcorr.cpu(), dim=0, index=list_loc.unsqueeze(1).repeat(1, 768))

            h_dict[cls].append(filtered_ent.cpu().data.numpy())
            feat_dict[cls].append(filtered_feat.cpu().data.numpy())
    #只是为了测试
    numofset1 = 0
    numofset2 = 0
    for i in h_dict:
        dlen = np.concatenate(h_dict[i], axis=0).size
        numofset1 = numofset1 + dlen
    for i in feat_dict:
        flen = np.concatenate(feat_dict[i], axis=0).shape[0]
        numofset2 = numofset2 + flen

    available_cls = np.unique(available_cls)#必为[0,1]

    prototype_memory = []
    prototype_memory_dict = {}
    after_softmax_numpy_for_emergency = np.concatenate(after_softmax_numpy_for_emergency, axis=0)#将所有epoch的结果合并
    feature_numpy_for_emergency = np.concatenate(feature_numpy_for_emergency, axis=0)#将所有epoch的结果合并

    max_top1_ent = 0
    for cls in available_cls:
        ents_np = np.concatenate(h_dict[cls], axis=0)
        ent_idxs = np.argsort(ents_np)
        top1_ent = ents_np[ent_idxs[0]]
        if max_top1_ent < top1_ent:
            max_top1_ent = top1_ent
            max_top1_class = cls
    alpha = 2
    max_top1_ent = max_top1_ent * alpha
    class_protypeNum_dict = {}
    max_prototype = 0

    #分别分配匹配类和非匹配类中的样本数量
    #没有理论依据 完全自己决定
    #处理匹配类
    ents_np = np.concatenate(h_dict[1], axis=0)
    ents_np_filtered = (ents_np <= max_top1_ent)
    class_protypeNum_dict[1] = ents_np_filtered.sum()
    #处理非匹配类
    ents_np = np.concatenate(h_dict[0], axis=0)
    pseudo_label_dict = np.concatenate(pseudo_label_list, axis=0)
    match_0 = 0#统计不匹配
    match_1 = 0#统计匹配
    for match_result in pseudo_label_dict:
        if match_result == 0:
            match_0 = match_0 + 1
        else:
            match_1 = match_1 + 1
    # class_protypeNum_dict_0的数量由class_protypeNum_dict_1和伪标签比例决定
    class_protypeNum_dict[0] = class_protypeNum_dict[1] * (int(match_0 / match_1 + 0.5) + 1)

    max_prototype = class_protypeNum_dict[0]

    # if max_prototype > 100:
    #     max_prototype = max_prototype_bound

    for cls in range(2):
        ents_np = np.concatenate(h_dict[cls], axis=0)
        feats_np = np.concatenate(feat_dict[cls], axis=0)
        ent_idxs = np.argsort(ents_np)

        truncated_feat = feats_np[ent_idxs[:class_protypeNum_dict[cls]]]
        fit_to_max_prototype = np.concatenate([truncated_feat] * (int(max_prototype / truncated_feat.shape[0]) + 1),
                                              axis=0)#先通过堆叠，保证数量不小于max_prototype
        fit_to_max_prototype = fit_to_max_prototype[:max_prototype, :]#然后截取，使得最终每个类别的原型数量都是max_prototype

        prototype_memory.append(fit_to_max_prototype)
        prototype_memory_dict[cls] = fit_to_max_prototype

    print("** APM update... time:", time.time() - start_time)
    prototype_memory = np.concatenate(prototype_memory, axis=0)
    num_prototype_ = int(max_prototype)

    return prototype_memory, num_prototype_, prototype_memory_dict

