import os

os.environ['MKL_NUM_THREADS'] = '1'
import time
import random
import datetime
import argparse
import warnings
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
import data_loader_tst
import numpy as np
import torch.nn as nn
from collections import defaultdict
from models_tst import Prototypical, Discriminator
from loss_tst import classification_loss_func, explicit_semantic_alignment_loss_func, knowledge_distillation_loss_func, \
    get_prototype_label, compute_cluster_center, pred_u_label, cal_one_loss, cal_st_center_loss, l2_feature, \
    cal_git_loss, label_smooth_ce_loss, get_na_label, get_decrease_label, knn_get_label, simpleshot, orth_loss, \
    three_alignment_loss, three_alignment_loss_mse, k_means
from utils_tst import write_log_record, seed_everything, make_dirs

warnings.filterwarnings('ignore')

parser = argparse.ArgumentParser(
    description='Simultaneous Semantic Alignment Network for Heterogeneous Domain Adaptation')
parser.add_argument('--source', type=str, default='caltech', help='Source domain',
                    choices=['amazon_surf', 'amazon_decaf', 'amazon_resnet', 'webcam_surf', 'webcam_decaf',
                             'webcam_resnet', 'caltech_surf', 'caltech_decaf', 'caltech_resnet', 'dslr_surf',
                             'dslr_decaf', 'dslr_resnet', 'nustag', 'imagenet',
                             'english', 'french', 'german', 'italian'])
parser.add_argument('--target', type=str, default='amazon', help='Target domain',
                    choices=['amazon_surf', 'amazon_decaf', 'amazon_resnet', 'webcam_surf', 'webcam_decaf',
                             'webcam_resnet', 'caltech_surf', 'caltech_decaf', 'caltech_resnet', 'dslr_surf',
                             'dslr_decaf', 'dslr_resnet', 'nustag', 'imagenet',
                             'spanish5', 'spanish10', 'spanish15', 'spanish20'])
parser.add_argument('--cuda', type=str, default='0', help='Cuda index number')
parser.add_argument('--nepoch', type=int, default=3000, help='Epoch amount')
parser.add_argument('--partition', type=int, default=5, help='Number of partition')
parser.add_argument('--kk', type=int, default=5, help='Number of partition')
parser.add_argument('--prototype', type=str, default='three', choices=['two', 'three'],
                    help='how many prototypes used for domain and general alignment loss')
parser.add_argument('--layer', type=str, default='double', choices=['single', 'double'],
                    help='Structure of the projector network, single layer or double layers projector')
parser.add_argument('--d_common', type=int, default=256, help='Dimension of the common representation')
parser.add_argument('--optimizer', type=str, default='mSGD', choices=['SGD', 'mSGD', 'Adam'], help='optimizer options')
parser.add_argument('--lr', type=float, default=0.1, help='Learning rate')
parser.add_argument('--temperature', type=float, default=5.0, help='source softmax temperature')
parser.add_argument('--alpha', type=float, default=0.1,
                    help='Trade-off parameter in front of L_soft, set to 0.0 to turn it off'
                         'Weight the (1 - alpha) * hard CE loss and alpha * soft CE loss')
parser.add_argument('--beta', type=float, default=0.1, help='Trade-off parameter of L_ESA, set to 0 to turn off')
parser.add_argument('--gamma', type=float, default=0.1, help='Trade-off parameter of L_D, set to 0 to turn off')
parser.add_argument('--sss', type=float, default=0.1, help='Trade-off parameter of L_ESA, set to 0 to turn off')
parser.add_argument('--neg_sss', type=float, default=1.0, help='Trade-off parameter of L_ESA, set to 0 to turn off')
parser.add_argument('--combine_pred', type=str, default='Cosine',
                    choices=['Euclidean', 'Cosine', 'Euclidean_threshold', 'Cosine_threshold', 'None'],
                    help='the way of prototype predictions Euclidean, Cosine, None(not use)')
parser.add_argument('--checkpoint_path', type=str, default='checkpoint', help='All records save path')
parser.add_argument('--seed', type=int, default=2020, help='seed for everything')

args = parser.parse_args()
args.time_string = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H-%M-%S')

if torch.cuda.is_available():
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda
    if len(args.cuda) == 1:
        torch.cuda.set_device(int(args.cuda))

# seed for everything
seed_everything(args)
# make dirs
# make_dirs(args)
print(str(args))


def test(model, configuration, srctar):
    model.eval()
    if srctar == 'source':
        loader = configuration['source_data']
        N = configuration['ns']
    elif srctar == 'labeled_target':
        loader = configuration['labeled_target_data']
        N = configuration['nl']
    elif srctar == 'unlabeled_target':
        loader = configuration['unlabeled_target_data']
        N = configuration['nu']
    else:
        raise Exception('Parameter srctar invalid! ')

    with torch.no_grad():
        feature, label = loader[0].float(), loader[1].reshape(-1, ).long()
        if torch.cuda.is_available():
            feature, label = feature.cuda(), label.cuda()
        classifier_output, _ = model(input_feature=feature)
        _, pred = torch.max(classifier_output.data, 1)
        n_correct = (pred == label).sum().item()
        acc = float(n_correct) / N * 100.

    return acc


def train(model, model_d, optimizer, optimizer_d, configuration):
    best_acc = 0
    class_prototype = None

    start_time = time.time()

    # prepare data
    source_data = configuration['source_data']
    l_target_data = configuration['labeled_target_data']
    u_target_data = configuration['unlabeled_target_data']
    source_feature, source_label = source_data[0].float(), source_data[1].reshape(-1, ).long()
    l_target_feature, l_target_label = l_target_data[0].float(), l_target_data[1].reshape(-1, ).long()
    u_target_feature, u_target_label = u_target_data[0].float(), u_target_data[1].reshape(-1, ).long()
    if torch.cuda.is_available():
        source_feature, source_label = source_feature.cuda(), source_label.cuda()
        l_target_feature, l_target_label = l_target_feature.cuda(), l_target_label.cuda()
        u_target_feature, u_target_label = u_target_feature.cuda(), u_target_label.cuda()

    # target_features = torch.cat((l_target_feature, u_target_feature), dim=0)
    # target_labels = torch.cat((l_target_label, u_target_feature))
    t_cluster = compute_cluster_center(l_target_feature, l_target_label, configuration['class_number'])

    # forward propagation
    new_center, new_label = k_means(u_target_feature, u_target_label, t_cluster)
    print(new_center.shape)

    dist_xt_ct_temp = u_target_feature.unsqueeze(1) - new_center.unsqueeze(0)
    dist_xt_ct = dist_xt_ct_temp.pow(2).sum(2)

    dis, idx_sim = (-1 * dist_xt_ct).data.topk(2, 1, True, True)

    print('dis.shape', dis.shape)
    class_num = configuration['class_number']

    aver_dis = dis[:, 0] - dis[:, 1]

    new_label = idx_sim[:, 0].reshape(-1)

    feature_centers = torch.zeros(class_num).cuda()
    for i in range(class_num):
        feature_centers[i] = aver_dis[new_label == i].mean()
    print(feature_centers)

    u_select = []
    not_u_select = []
    print('dis_shape', dis.shape)
    for i in range(len(new_label)):
        if aver_dis[i] > feature_centers[new_label[i]]:
            u_select.append(i)
        else:
            not_u_select.append(i)

    # time.sleep(100)

    select_u_feature = u_target_feature[u_select, :]
    select_u_label = new_label[u_select]

    true_u_label = u_target_label[u_select]
    n_correct = (select_u_label == true_u_label).sum().item()
    acc = float(n_correct) / len(select_u_label) * 100.
    print('select acc', acc, len(select_u_label))

    left_u_feature = u_target_feature[not_u_select, :]
    #
    # print('select_u', select_u_feature.shape, left_u_feature.shape)
    l_target_feature = torch.cat((l_target_feature, select_u_feature), dim=0)
    l_target_label = torch.cat((l_target_label, select_u_label))

    u_target_feature = left_u_feature

    # training
    for epoch in range(args.nepoch):
        start_time = time.time()
        model.train()
        model_d.train()
        optimizer.zero_grad()
        optimizer_d.zero_grad()

        source_output, source_learned_feature = model(input_feature=source_feature)
        l_target_output, l_target_learned_feature = model(input_feature=l_target_feature)
        u_target_output, u_target_learned_feature = model(input_feature=u_target_feature)
        # print(l_target_output.shape, u_target_output.shape)
        # time.sleep(10)
        # _, u_target_pseudo_label = torch.max(u_target_output, 1)

        error_overall = 0.0

        transfer_criterion = nn.BCELoss()
        alpha = 2. / (1. + np.exp(-10 * float(epoch / args.nepoch))) - 1
        domain_labels = torch.from_numpy(
            np.array([[1]] * configuration['ns'] + [[0]] * configuration['nt'])).float()
        if torch.cuda.is_available():
            domain_labels = domain_labels.cuda()
        discriminator_out = model_d(
            torch.cat((source_learned_feature, l_target_learned_feature, u_target_learned_feature), dim=0), alpha)
        domain_adv_alignment_loss = transfer_criterion(discriminator_out, domain_labels)
        error_overall += args.gamma * domain_adv_alignment_loss
        if epoch % 30 == 0:
            print('Use domain adversarial loss: ', (args.gamma * domain_adv_alignment_loss).item())

        # ============================  source ce loss ============================
        s_ce_loss = classification_loss_func(source_output, source_label)
        error_overall += s_ce_loss
        if epoch % 30 == 0:
            print('Source CE loss: ', s_ce_loss.item())

        # ========================  target labeled ce loss ========================
        l_ce_loss = classification_loss_func(l_target_output, l_target_label)
        error_overall += l_ce_loss
        if epoch % 30 == 0:
            print('L target CE loss: ', l_ce_loss.item())

            # backward propagation
        error_overall.backward()
        optimizer.step()
        optimizer_d.step()

        if epoch % 1 == 0:
            # Testing Phase
            acc_unlabeled_tar = test(model, configuration, 'unlabeled_target')
            end_time = time.time()

            if epoch % 50 == 0:
                print('ACC -> ', end='')
                print('Epoch: [{}/{}], {:.1f}s, UTar acc: {:.4f}%'.format(
                    epoch, args.nepoch, end_time - start_time, acc_unlabeled_tar))

            if best_acc < acc_unlabeled_tar:
                best_acc = acc_unlabeled_tar
                best_text = args.source.ljust(10) + '-> ' + args.target.ljust(10) \
                            + ' The proposed model for HDA achieves current best accuracy. ' + str(best_acc)
                print(best_text)
                if epoch >= 1000:
                    print('need more epoch training')
                    # end for max_epoch


    print('Best Test Accuracy: {:.4f}%'.format(best_acc))
    return best_acc

if __name__ == '__main__':
    result = 0.
    max_result = []
    for i in range(args.partition):
        configuration = data_loader_tst.get_configuration(args)
        model = Prototypical(configuration['d_source'], configuration['d_target'], args.d_common,
                             configuration['class_number'], args.layer)
        model_D = Discriminator(args.d_common)
        if torch.cuda.is_available():
            model = model.cuda()
            model_D = model_D.cuda()
        if args.optimizer == 'SGD':
            optimizer = optim.SGD(model.parameters(), lr=args.lr)
        elif args.optimizer == 'mSGD':
            optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9,
                                  weight_decay=0.001, nesterov=True)
            optimizer_d = optim.SGD(model_D.parameters(), lr=args.lr, momentum=0.9,
                                    weight_decay=0.001, nesterov=True)
        elif args.optimizer == 'Adam':
            optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.99))

        local_result = train(model, model_D, optimizer, optimizer_d, configuration)
        result += local_result
        max_result.append(local_result)
        print('max_result', max_result)
        print('Avg acc:', str('%.4f' % (result / len(max_result))).ljust(4))
    print('Avg acc:', str('%.4f' % (result / args.partition)).ljust(4))
    print()
