# 无源域适应第二种方法 SHOT
# 借助IM损失和自监督伪标签
# 参考论文  Do We Really Need to Access the Source Data? Source Hypothesis Transfer for Unsupervised Domain Adaptation
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
import numpy as np
import sklearn.metrics as metrics
import argparse
from sfda.lib import  *

from easydl import OptimizerManager, OptimWithSheduler, TrainingModeManager, inverseDecaySheduler

import ditto_light
from ditto_light.dataset import DittoDataset
from torch.utils import data
from transformers import AutoModel, AdamW, get_linear_schedule_with_warmup
from tensorboardX import SummaryWriter
from apex import amp
from tool import evaluate
from tool import calculate
def shot_train(trainset, validset, testset,hp,threshold,tragetNet,netF, netC):
    print("======SHOT方法进行SFDA========")
    padder = trainset.pad
    # create the DataLoaders
    train_iter = data.DataLoader(dataset=trainset,
                                 batch_size=hp.batch_size,
                                 shuffle=True,
                                 num_workers=0,
                                 collate_fn=padder)
    valid_iter = data.DataLoader(dataset=validset,
                                 batch_size=hp.batch_size * 16,
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=padder)
    test_iter = data.DataLoader(dataset=testset,
                                batch_size=hp.batch_size * 16,
                                shuffle=False,
                                num_workers=0,
                                collate_fn=padder)

    befor_sfda_f1 = evaluate.evaluateF1(tragetNet, test_iter, threshold=threshold)
    print('>>>>>>>>>>>初始F1>>>>>>>>>>>>>>>>.')
    print(befor_sfda_f1)

    iter_num = 0

    while iter_num < 100:
        # 目前只考虑IM损失 不考虑伪标签的自监督训练 看效果是否有提升
        for i, (x, y) in enumerate(test_iter):
            x = x.cuda()
            classifier_loss = torch.tensor(0.0).cuda()
            if hp.ent:
                outputs_test = tragetNet(x)
                softmax_out = nn.Softmax(dim=1)(outputs_test)# torch.Size([64, 10]) 对分类器的输出进行softmax
                # torch.mean(loss.Entropy(softmax_out)) 表示L_ent
                entropy_loss = torch.mean(calculate.Entropy().Entropy(softmax_out)) # 计算样本熵值的平均
                if hp.gent:# 如果考虑多样性损失
                    msoftmax = softmax_out.mean(dim=0) # torch.Size([10]) 样本输出的平均
                    # torch.sum(-msoftmax * torch.log(msoftmax + 1e-5)) 表示L_div
                    entropy_loss -= torch.sum(-msoftmax * torch.log(msoftmax + 1e-5)) # 减掉输出平均的熵值
                #IM的计算为：输出平均的熵 - 输出的熵的平均 IM越大越好
                #IM_Loss的计算为：输出的熵的平均 - 输出平均的熵 IM越小越好
                im_loss = entropy_loss
                classifier_loss += im_loss
