# 无源域适应第一种方法 PrDA
# 借助APM 和 伪标签过滤方法 利用高置信度样本
# 参考论文  Progressive Domain Adaptation from a Source Pre-trained Model
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
import numpy as np
import sklearn.metrics as metrics
import argparse
from sfda.lib import  *

from easydl import OptimizerManager, OptimWithSheduler, TrainingModeManager, inverseDecaySheduler

import ditto_light
from ditto_light.dataset import DittoDataset
from torch.utils import data
from transformers import AutoModel, AdamW, get_linear_schedule_with_warmup
from tensorboardX import SummaryWriter
from apex import amp
from sfda.APM_update import APM_init_update
from tool import evaluate
os.environ["CUDA_VISIBLE_DEVICES"]='0'
lm_mp = {'roberta': 'roberta-base',
         'distilbert': 'distilbert-base-uncased'}
#import global_data
class DittoModel(nn.Module):
    """A baseline model for EM."""

    def __init__(self, device='cuda', lm='roberta', alpha_aug=0.8):
        super().__init__()
        if lm in lm_mp:
            self.bert = AutoModel.from_pretrained(lm_mp[lm])#下载并加载预训练模型
        else:
            self.bert = AutoModel.from_pretrained(lm)

        self.device = device
        self.alpha_aug = alpha_aug

        # linear layer
        hidden_size = self.bert.config.hidden_size#BERT中每个标记的嵌入大小为768
        self.fc = torch.nn.Linear(hidden_size, 2)


    def forward(self, x1, x2=None):
        """Encode the left, right, and the concatenation of left+right.

        Args:
            x1 (LongTensor): a batch of ID's
            x2 (LongTensor, optional): a batch of ID's (augmented)

        Returns:
            Tensor: binary prediction
        """
        x1 = x1.to(self.device) # (batch_size, seq_len)
        if x2 is not None:
            # MixDA
            x2 = x2.to(self.device) # (batch_size, seq_len)
            enc = self.bert(torch.cat((x1, x2)))[0][:, 0, :]#：表示将batchsize保留
            batch_size = len(x1)
            enc1 = enc[:batch_size] # (batch_size, emb_size)
            enc2 = enc[batch_size:] # (batch_size, emb_size)

            aug_lam = np.random.beta(self.alpha_aug, self.alpha_aug)
            #获取CLS向量
            enc = enc1 * aug_lam + enc2 * (1.0 - aug_lam)
        else:
            #获取CLS向量
            enc = self.bert(x1)[0][:, 0, :]

        return self.fc(enc) # .squeeze() # .sigmoid()

def train_step(train_iter, model, optimizer, scheduler, hp):
    """Perform a single training step

    Args:
        train_iter (Iterator): the train data loader
        model (DMModel): the model
        optimizer (Optimizer): the optimizer (Adam or AdamW)
        scheduler (LRScheduler): learning rate scheduler
        hp (Namespace): other hyper-parameters (e.g., fp16)

    Returns:
        None
    """
    criterion = nn.CrossEntropyLoss()
    # criterion = nn.MSELoss()
    for i, batch in enumerate(train_iter):
        optimizer.zero_grad()
        batch1=[]
        for element in batch:
            batch1.append(element.to(model.device))

        if len(batch1) == 2:
            x, y = batch1
            prediction = model(x)
        else:
            x1, x2, y = batch1
            prediction = model(x1, x2)

        loss = criterion(prediction, y.to(model.device))#交叉熵损失的计算过程

        if hp.fp16:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()
        optimizer.step()
        scheduler.step()
        if i % 10 == 0: # monitoring
            print(f"step: {i}, loss: {loss.item()}")
        del loss


def train(trainset, validset, testset, run_tag, hp):
    """Train and evaluate the model

    Args:
        trainset (DittoDataset): the training set
        validset (DittoDataset): the validation set
        testset (DittoDataset): the test set
        run_tag (str): the tag of the run
        hp (Namespace): Hyper-parameters (e.g., batch_size,
                        learning rate, fp16)

    Returns:
        None
    """
    padder = trainset.pad
    # create the DataLoaders
    train_iter = data.DataLoader(dataset=trainset,
                                 batch_size=hp.batch_size,
                                 shuffle=True,
                                 num_workers=0,
                                 collate_fn=padder)
    valid_iter = data.DataLoader(dataset=validset,
                                 batch_size=hp.batch_size*16,
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=padder)
    test_iter = data.DataLoader(dataset=testset,
                                 batch_size=hp.batch_size*16,
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=padder)

    # initialize model, optimizer, and LR scheduler
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model = DittoModel(device=device,
                       lm=hp.lm,
                       alpha_aug=hp.alpha_aug)
    model = model.cuda()
    optimizer = AdamW(model.parameters(), lr=hp.lr)

    if hp.fp16:
        model, optimizer = amp.initialize(model, optimizer, opt_level='O2')
    num_steps = (len(trainset) // hp.batch_size) * hp.n_epochs
    scheduler = get_linear_schedule_with_warmup(optimizer,
                                                num_warmup_steps=0,
                                                num_training_steps=num_steps)

    # logging with tensorboardX
    writer = SummaryWriter(log_dir=hp.logdir)

    best_dev_f1 = best_test_f1 = 0.0
    #best_test_f1s = []
    for epoch in range(1, hp.n_epochs+1):
        # train
        model.train()
        train_step(train_iter, model, optimizer, scheduler, hp)

        # eval
        model.eval()
        dev_f1, th = evaluate.evaluateF1(model, valid_iter)
        test_f1 = evaluate.evaluateF1(model, test_iter, threshold=th)

        if dev_f1 > best_dev_f1:
            best_dev_f1 = dev_f1
            best_test_f1 = test_f1
            if hp.save_model:
                # create the directory if not exist
                directory = os.path.join(hp.logdir, hp.task)#直接在对应的的目录下寻找模型
                if not os.path.exists(directory):#若不存在，则创建目录
                    os.makedirs(directory)

                # save the checkpoints for each component
                ckpt_path = os.path.join(hp.logdir, hp.task, 'model.pt')
                ckpt = {'model': model.state_dict(),
                        'optimizer': optimizer.state_dict(),
                        'scheduler': scheduler.state_dict(),
                        'epoch': epoch}
                torch.save(ckpt, ckpt_path)

        print(f"epoch {epoch}: dev_f1={dev_f1}, f1={test_f1}, best_f1={best_test_f1}")

        # logging
        scalars = {'f1': dev_f1,
                   't_f1': test_f1}
        writer.add_scalars(run_tag, scalars, epoch)

    writer.close()

    with open('output.txt','a') as f:

        f.write(str(best_test_f1))
        f.write('\n')

def train_step_ft(train_iter, model, optimizer, scheduler, hp):
    criterion = nn.CrossEntropyLoss()
    # criterion = nn.MSELoss()
    for i, batch in enumerate(train_iter):
        optimizer.zero_grad()
        batch1=[]
        for element in batch:
            batch1.append(element.to(model.device))

        if len(batch1) == 2:
            x, y = batch1
            prediction = model(x)
        else:
            x1, x2, y = batch1
            prediction = model(x1, x2)

        loss = criterion(prediction, y.to(model.device))#交叉熵损失的计算过程

        if hp.fp16:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()
        optimizer.step()
        scheduler.step()
        if i % 10 == 0: # monitoring
            print(f"step: {i}, loss: {loss.item()}")
        del loss

def fine_tune(trainset, validset, testset, sourcemodel, hp):
    padder = trainset.pad
    # create the DataLoaders
    train_iter = data.DataLoader(dataset=trainset,
                                 batch_size=hp.batch_size,
                                 shuffle=True,
                                 num_workers=0,
                                 collate_fn=padder)
    valid_iter = data.DataLoader(dataset=validset,
                                 batch_size=hp.batch_size*16,
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=padder)
    test_iter = data.DataLoader(dataset=testset,
                                 batch_size=hp.batch_size*16,
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=padder)

    # initialize model, optimizer, and LR scheduler
    model = sourcemodel
    optimizer = AdamW(model.parameters(), lr=hp.lr)
    if hp.fp16:
         model, optimizer = amp.initialize(model, optimizer, opt_level='O2')

    num_steps = (len(trainset) // hp.batch_size) * hp.n_epochs
    scheduler = get_linear_schedule_with_warmup(optimizer,
                                                num_warmup_steps=0,
                                                num_training_steps=num_steps)

    best_dev_f1 = best_test_f1 = 0.0
    for epoch in range(1, hp.n_epochs+1):
        # train
        model.train()
        train_step_ft(train_iter, model, optimizer, scheduler, hp)

        # eval
        model.eval()
        dev_f1, th = evaluate.evaluateF1(model, valid_iter)
        test_f1 = evaluate.evaluateF1(model, test_iter, threshold=th)

        if dev_f1 > best_dev_f1:
            best_dev_f1 = dev_f1
            best_test_f1 = test_f1

        print(f"epoch {epoch}: dev_f1={dev_f1}, f1={test_f1}, best_f1={best_test_f1}")
    with open('fine-tune.txt', 'a') as f:
        f.write(str(best_test_f1))
        f.write('\n')

# PrDA: 使用伪标签加权过滤策略进行无源域适应
def sfda_train(trainset, validset, testset,hp,threshold,
               save_model,#单纯用于测试
               fixed_sourceNet,
               trainable_tragetNet):
    padder = trainset.pad
    # create the DataLoaders
    train_iter = data.DataLoader(dataset=trainset,
                                 batch_size=hp.batch_size,
                                 shuffle=True,
                                 num_workers=0,
                                 collate_fn=padder)
    valid_iter = data.DataLoader(dataset=validset,
                                 batch_size=hp.batch_size*16,
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=padder)
    test_iter = data.DataLoader(dataset=testset,
                                 batch_size=hp.batch_size*16,
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=padder)
    # =====core coding=====
    # Training related hyperparameters
    update_frequncy = 100
    min_step = 5000
    weight_decay = 0.0005
    train_lr = 0.001
    momentum=0.9

    test_interval = 100
    #fixed_sourceNet part
    fixed_feature_extractor_s =  (fixed_sourceNet.feature_extractor).cuda()
    fixed_classifier_s = (fixed_sourceNet.classifier).cuda()

    # trainable_tragetNet part
    feature_extractor_t = (trainable_tragetNet.feature_extractor).cuda()
    classifier_s2t = (trainable_tragetNet.classifier_s2t).cuda()
    classifier_t = (trainable_tragetNet.classifier_t).cuda()

    scheduler = lambda step, initial_lr: inverseDecaySheduler(step, initial_lr, gamma=10, power=0.75,
                                                              max_iter=(min_step))

    # 1)构建 optimizer_finetune
    optimizer_1 = optim.SGD(feature_extractor_t.parameters(), lr=train_lr / 10.0, weight_decay=weight_decay,
                          momentum=momentum, nesterov=True)
    # if hp.fp16:
    #     feature_extractor_t, optimizer_1 = amp.initialize(feature_extractor_t, optimizer_1, opt_level='O2')

    optimizer_finetune = OptimWithSheduler(optimizer_1,scheduler)

    # 2)构建 optimizer_finetune
    optimizer_2 = optim.SGD(classifier_s2t.parameters(), lr=train_lr, weight_decay=weight_decay,
                  momentum=momentum, nesterov=True)
    # if hp.fp16:
    #     classifier_s2t, optimizer_2 = amp.initialize(classifier_s2t, optimizer_2,opt_level='O2')
    optimizer_classifier_s2t = OptimWithSheduler(optimizer_2,scheduler)

    # 3) 构建 optimizer_classifier_t
    optimizer_3 = optim.SGD(classifier_t.parameters(), lr=train_lr, weight_decay=weight_decay,
                  momentum=momentum, nesterov=True)

    # if hp.fp16:
    #     classifier_t, optimizer_3 = amp.initialize(classifier_t, optimizer_3,opt_level='O2')
    optimizer_classifier_t = OptimWithSheduler(
        optimizer_3,
        scheduler)

    global_step = 0
    epoch_id = 0
    best_f1 = 0
    #model
    save_model.eval()
    fixed_sourceNet.eval()
    trainable_tragetNet.eval()

    # SFDA之前测试F1
    test_f1 = evaluate.evaluateF1(trainable_tragetNet, test_iter, threshold=threshold)
    print('>>>>>>>>>>>初始F1>>>>>>>>>>>>>>>>.')
    print(test_f1)


    #while global_step < min_step:
    while global_step < 20:#方便测试
        epoch_id += 1
        for i, (x,y) in enumerate(test_iter):#用test_iter参与sfda并用test_iter参与测试
            # print(y)#模型评估的时候才会用到
            if (global_step) % update_frequncy == 0:  # 第一步更新 之后每100步更新一次APM
                prototype_memory, num_prototype_, prototype_memory_dict = APM_init_update(train_iter,feature_extractor_t,classifier_t,threshold)
            x = x.cuda()
            # forward pass:  source-pretrained network
            # fixed_sourceNet使用不同的fordward函数
            fixed_fc1_s = fixed_feature_extractor_s(x)
            logit_s = fixed_classifier_s(fixed_fc1_s)#已经进行了softmax
            #logit_s.softmax(dim=1)为对输出的结果进行softmax 依据softmax了 不必再执行
            probs = logit_s[:, 1]#probs为匹配的概率
            #根据fixed_sourceNet的输出，添加伪标签
            if threshold is not None:
                pseudo_label_s = [1 if p > threshold else 0 for p in probs]
                pseudo_label_s = torch.tensor(pseudo_label_s).cuda()
            else:
                #默认情况 0.5
                pseudo_label_s = torch.argmax(logit_s, dim=1)
            # forward pass:  target network
            fc1_t = feature_extractor_t(x)
            logit_s2t= classifier_s2t(fc1_t)
            logit_t = classifier_t(fc1_t)
            # 计算第二个loss过程
            # compute pseudo labels
            proto_feat_tensor = torch.Tensor(prototype_memory)  # (B * 2048)
            feature_embed_tensor = fc1_t.cpu()
            proto_feat_tensor = tensor_l2normalization(proto_feat_tensor)  #
            batch_feat_tensor = tensor_l2normalization(feature_embed_tensor)

            sim_mat = torch.mm(batch_feat_tensor, proto_feat_tensor.permute(1, 0))
            sim_mat = F.avg_pool1d(sim_mat.unsqueeze(0), kernel_size=num_prototype_, stride=num_prototype_).squeeze(
                0)  # (B, #class)
            pseudo_label_t = torch.argmax(sim_mat, dim=1).cuda()
            # confidence-based filtering
            arg_idxs = torch.argsort(sim_mat, dim=1, descending=True)  # (B, #class)

            first_group_idx = arg_idxs[:, 0]
            second_group_idx = arg_idxs[:, 1]

            first_group_feat = [prototype_memory_dict[int(x.data.numpy())] for x in first_group_idx]
            first_group_feat_tensor = torch.tensor(np.concatenate(first_group_feat, axis=0))  # (B*P, 2048)
            first_group_feat_tensor = tensor_l2normalization(first_group_feat_tensor)

            second_group_feat = [prototype_memory_dict[int(x.data.numpy())] for x in second_group_idx]
            second_group_feat_tensor = torch.tensor(np.concatenate(second_group_feat, axis=0))  # (B*P, 2048)
            second_group_feat_tensor = tensor_l2normalization(second_group_feat_tensor)

            feature_embed_tensor_repeat = torch.Tensor(
                np.repeat(feature_embed_tensor.cpu().data.numpy(), repeats=num_prototype_, axis=0))
            feature_embed_tensor_repeat = tensor_l2normalization(feature_embed_tensor_repeat)

            first_dist_mat = 1 - torch.mm(first_group_feat_tensor,
                                          feature_embed_tensor_repeat.permute(1, 0))  # distance = 1  - simialirty
            second_dist_mat = 1 - torch.mm(second_group_feat_tensor, feature_embed_tensor_repeat.permute(1, 0))

            first_dist_mat = F.max_pool2d(first_dist_mat.permute(1, 0).unsqueeze(0).unsqueeze(0),
                                          kernel_size=num_prototype_, stride=num_prototype_).squeeze(0).squeeze(
                0)  # (B, #class)
            second_dist_mat = -1 * F.max_pool2d(-1 * second_dist_mat.permute(1, 0).unsqueeze(0).unsqueeze(0),
                                                kernel_size=num_prototype_, stride=num_prototype_).squeeze(0).squeeze(
                0)  # (B, #class)

            first_dist_vec = torch.diag(first_dist_mat)  # (B)
            second_dist_vec = torch.diag(second_dist_mat)  # B

            confidence_mask = ((first_dist_vec - second_dist_vec) < 0).cuda()


            # optimize target network using two types of pseudo labels
            # Loss1----ce_from_s2t
            # logit_s2t为classifier_s2t输出结果
            # pseudo_label_s为源模型生成的伪标签
            ce_from_s2t = nn.CrossEntropyLoss()(logit_s2t, pseudo_label_s)

            # Loss2---- ce_from_t: logit_t为classifier_t的输出结果 pseudo_label_t为借助APM生成的伪标签
            ce_from_t = nn.CrossEntropyLoss(reduction='none')(logit_t, pseudo_label_t).view(-1, 1).squeeze(1)
            # pseudo_label_t还要乘以confidence_mask进行过滤
            # ce_from_t = torch.mean(ce_from_t * confidence_mask, dim=0, keepdim=True)

            #临时 之后要改到上面去 如果不进行过滤
            ce_from_t = torch.mean(ce_from_t * confidence_mask, dim=0, keepdim=True)

            alpha = np.float(2.0 / (1.0 + np.exp(-10 * global_step / float(min_step // 2))) - 1.0)
            ce_total = (1 - alpha) * ce_from_s2t + alpha * ce_from_t

            fixed_sourceNet.train()
            trainable_tragetNet.train()
            with OptimizerManager([optimizer_finetune, optimizer_classifier_s2t, optimizer_classifier_t]):
                loss = ce_total
                loss.backward()

            global_step += 1
            # evaluation during training
            # if global_step % test_interval == 0:
            if global_step % 2 == 0:#方便理解看成果
                fixed_sourceNet.eval()
                trainable_tragetNet.eval()
                test_f1 = evaluate.evaluateF1(trainable_tragetNet, test_iter, threshold=threshold)
                print('>>>>>>>>>>>f1>>>>>>>>>>>>>>>>.')
                print(test_f1)
                print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>.')
                if test_f1 > best_f1:
                    best_f1 = test_f1
    print('>>>>>>>>>>>best_f1>>>>>>>>>>>>>>>>.')
    print(best_f1)
    print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>.')