#!/usr/bin/python
# -*- coding: utf-8 -*-

import sys, time, os, argparse, socket
import yaml
import numpy
import pdb
import torch
import glob
import zipfile
import warnings
import datetime
from tuneThreshold import *
from SpeakerNet import *
from DatasetLoader import *
import torch.distributed as dist
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter

## ===== ===== ===== ===== ===== ===== ===== =====
## Parse arguments
## ===== ===== ===== ===== ===== ===== ===== =====

parser = argparse.ArgumentParser(description="SpeakerNet");

parser.add_argument('--config', type=str, default=None, help='Config YAML file');

## Data loader
parser.add_argument('--max_frames', type=int, default=200, help='Input length to the network for training');
parser.add_argument('--eval_frames', type=int, default=300,
                    help='Input length to the network for testing; 0 uses the whole files');
# parser.add_argument('--batch_size', type=int, default=400, help='Batch size, number of speakers per batch');
parser.add_argument('--batch_size', type=int, default=100, help='Batch size, number of speakers per batch');
parser.add_argument('--max_seg_per_spk', type=int, default=500,
                    help='Maximum number of utterances per speaker per epoch');
# 16  3个线程加载程序
# parser.add_argument('--nDataLoaderThread', type=int, default=4, help='Number of loader threads');
parser.add_argument('--nDataLoaderThread', type=int, default=0, help='Number of loader threads');
parser.add_argument('--augment', type=bool, default=False, help='Augment input')
parser.add_argument('--seed', type=int, default=10, help='Seed for the random number generator');

## Training details
# 第一步训练
parser.add_argument('--test_interval', type=int, default=5, help='Test and save every [test_interval] epochs');
parser.add_argument('--max_epoch', type=int, default=300, help='Maximum number of epochs');
# 第二步训练
# parser.add_argument('--test_interval', type=int, default=2, help='Test and save every [test_interval] epochs');
# parser.add_argument('--max_epoch', type=int, default=320, help='Maximum number of epochs');

parser.add_argument('--trainfunc', type=str, default="aamsoftmax", help='Loss function');

## Optimizer
parser.add_argument('--optimizer', type=str, default="adam", help='sgd or adam');
parser.add_argument('--scheduler', type=str, default="steplr", help='Learning rate scheduler');
# parser.add_argument('--lr', type=float, default=0.001, help='Learning rate');
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate');
# parser.add_argument("--lr_decay", type=float, default=0.95, help='Learning rate decay every [test_interval] epochs');
parser.add_argument("--lr_decay", type=float, default=0.95, help='Learning rate decay every [test_interval] epochs');
parser.add_argument('--weight_decay', type=float, default=0, help='Weight decay in the optimizer');

## Loss functions
parser.add_argument("--hard_prob", type=float, default=0.5,
                    help='Hard negative mining probability, otherwise random, only for some loss functions');
parser.add_argument("--hard_rank", type=int, default=10,
                    help='Hard negative mining rank in the batch, only for some loss functions');
parser.add_argument('--margin', type=float, default=0.1, help='Loss margin, only for some loss functions');
parser.add_argument('--scale', type=float, default=30, help='Loss scale, only for some loss functions');

parser.add_argument('--nPerSpeaker', type=int, default=1,
                    help='Number of utterances per speaker per batch, only for metric learning based losses');

# parser.add_argument('--nClasses', type=int, default=1211,
#                     help='Number of speakers in the softmax layer, only for softmax-based losses');
# parser.add_argument('--nClasses', type=int, default=5994,
#                     help='Number of speakers in the softmax layer, only for softmax-based losses');
parser.add_argument('--nClasses', type=int, default=700,
                    help='Number of speakers in the softmax layer, only for softmax-based losses');
## Evaluation parameters
parser.add_argument('--dcf_p_target', type=float, default=0.05,
                    help='A priori probability of the specified target speaker');
parser.add_argument('--dcf_c_miss', type=float, default=1, help='Cost of a missed detection');
parser.add_argument('--dcf_c_fa', type=float, default=1, help='Cost of a spurious detection');

## Load and save
parser.add_argument('--initial_model', type=str, default="", help='Initial model weights');
# parser.add_argument('--save_path', type=str, default="exps/ST_ResNetSE34L_log_FA_80_4", help='Path for model and logs');
# parser.add_argument('--save_path', type=str, default="exps/ST_ResNetSE34L_log_80_3", help='Path for model and logs');
# parser.add_argument('--save_path', type=str, default="exps/ST_ResNetSE34L_log_MA_80_1", help='Path for model and logs');
parser.add_argument('--save_path', type=str, default="exps/ST_ResNetSE34L_log_MA_80_1", help='Path for model and logs');
## Training and test data

# parser.add_argument('--train_list', type=str, default="lists/vox2_train_list.txt", help='Train list');
# parser.add_argument('--test_list', type=str, default="lists/voxceleb1test.txt", help='Evaluation list');
# parser.add_argument('--train_path', type=str, default="D:\\dataset\\voxceleb2_wav\\vox2_aac\\dev\\aac",
#                     help='Absolute path to the train set');
# parser.add_argument('--test_path', type=str, default="D:\\dataset\\vox1_test",
#                     help='Absolute path to the test set');

parser.add_argument('--train_list', type=str, default="lists/ST_train_list.txt", help='Train list');
parser.add_argument('--test_list', type=str, default="lists/ST_test_list.txt", help='Evaluation list');
parser.add_argument('--train_path', type=str, default="D:\\dataset\\ST-CMDS-20170001_1-OS\\ST-CMDS-20170001_1-OS",
                    help='Absolute path to the train set');
parser.add_argument('--test_path', type=str, default="D:\\dataset\\ST-CMDS-20170001_1-OS\\ST-CMDS-20170001_1-OS",
                    help='Absolute path to the test set');

parser.add_argument('--musan_path', type=str, default="data/musan_split", help='Absolute path to the test set');
parser.add_argument('--rir_path', type=str, default="data/RIRS_NOISES/simulated_rirs",
                    help='Absolute path to the test set');

## Model definition
# parser.add_argument('--n_mels', type=int, default=40, help='Number of mel filterbanks');
parser.add_argument('--n_mels', type=int, default=80, help='Number of mel filterbanks');
# parser.add_argument('--log_input', type=bool, default=False, help='Log input features')
parser.add_argument('--log_input', type=bool, default=True, help='Log input features')
# parser.add_argument('--model', type=str, default="ResNetSE34L_CBAM", help='Name of model definition');
# parser.add_argument('--model', type=str, default="ResNetSE34L", help='Name of model definition');
# parser.add_argument('--model', type=str, default="ResNetSE34L_FA", help='Name of model definition');
parser.add_argument('--model', type=str, default="ResNetSE34L_MultiHeadAggregation", help='Name of model definition');
parser.add_argument('--encoder_type', type=str, default="SAP", help='Type of encoder');
parser.add_argument('--nOut', type=int, default=512, help='Embedding size in the last FC layer');

## For test only
parser.add_argument('--eval', dest='eval', action='store_true', help='Eval only')

## Distributed and mixed precision training
parser.add_argument('--port', type=str, default="8888", help='Port for distributed training, input as text');
parser.add_argument('--distributed', dest='distributed', action='store_true', help='Enable distributed training')
parser.add_argument('--mixedprec', dest='mixedprec', action='store_true', help='Enable mixed precision training')

args = parser.parse_args();


## Parse YAML
def find_option_type(key, parser):
    for opt in parser._get_optional_actions():
        if ('--' + key) in opt.option_strings:
            return opt.type
    raise ValueError


if args.config is not None:
    with open(args.config, "r") as f:
        yml_config = yaml.load(f, Loader=yaml.FullLoader)
    for k, v in yml_config.items():
        if k in args.__dict__:
            typ = find_option_type(k, parser)
            args.__dict__[k] = typ(v)
        else:
            sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))

## Try to import NSML
try:
    import nsml
    from nsml import HAS_DATASET, DATASET_PATH, PARALLEL_WORLD, PARALLEL_PORTS, MY_RANK
    from nsml import NSML_NFS_OUTPUT, SESSION_NAME
except:
    pass;

warnings.simplefilter("ignore")

# 释放GPU内存
torch.cuda.empty_cache()


## ===== ===== ===== ===== ===== ===== ===== =====
## Trainer script
## ===== ===== ===== ===== ===== ===== ===== =====

def main_worker(gpu, ngpus_per_node, args):
    args.gpu = gpu

    ## Load models
    s = SpeakerNet(**vars(args));

    if args.distributed:
        os.environ['MASTER_ADDR'] = 'localhost'
        os.environ['MASTER_PORT'] = args.port

        dist.init_process_group(backend='nccl', world_size=ngpus_per_node, rank=args.gpu)

        torch.cuda.set_device(args.gpu)
        s.cuda(args.gpu)

        s = torch.nn.parallel.DistributedDataParallel(s, device_ids=[args.gpu], find_unused_parameters=True)

        print('Loaded the model on GPU {:d}'.format(args.gpu))
    else:
        s = WrappedModel(s).cuda(args.gpu)

    it = 1
    eers = [100];

    if args.gpu == 0:
        ## Write args to scorefile
        scorefile = open(args.result_save_path + "/scores.txt", "a+");

    ## Initialise trainer and data loader
    train_dataset = train_dataset_loader(**vars(args))

    train_sampler = train_dataset_sampler(train_dataset, **vars(args))

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        num_workers=args.nDataLoaderThread,
        sampler=train_sampler,
        # pin_memory=False,
        pin_memory=True,
        worker_init_fn=worker_init_fn,
        drop_last=True,
    )

    # trainLoader = get_data_loader(args.train_list, **vars(args));
    trainer = ModelTrainer(s, **vars(args))

    ## Load model weights
    modelfiles = glob.glob('%s/model0*.model' % args.model_save_path)
    modelfiles.sort()

    # test only
    if False:
        print('modelfiles:', len(modelfiles))
        scorefile = open(args.result_save_path + "/Vox1-test.txt", "a+");
        for i in range(10):
            # print('len(modelfiles)-1-i:\n', len(modelfiles) - 1 - i)
            # print('it:\n', int(os.path.splitext(os.path.basename(modelfiles[len(modelfiles) - 1 - i]))[0][5:]))
            # print('model:',modelfiles[len(modelfiles) - 1 - i])
            # print('len(modelfiles)-1-i:\n', len(modelfiles) - 1 - 27)
            print('it:\n', int(os.path.splitext(os.path.basename(modelfiles[0]))[0][5:]))
            print('model:', modelfiles[0])
            # exit()
            # trainer.loadParameters(modelfiles[len(modelfiles) - 1 - i]);
            trainer.loadParameters(modelfiles[0]);
            sc, lab, _ = trainer.evaluateFromList(**vars(args))
            result = tuneThresholdfromScore(sc, lab, [1, 0.1]);

            scorefile.write(
                "It {:d}, EER {:2.3f} \n".format(
                    int(os.path.splitext(os.path.basename(modelfiles[0]))[0][5:]), result[1]));
            scorefile.flush()

            print('eer:\n', result[1])
        scorefile.close();
        exit()

    if (args.initial_model != ""):
        trainer.loadParameters(args.initial_model);
        print("Model {} loaded!".format(args.initial_model));
    elif len(modelfiles) >= 1:
        trainer.loadParameters(modelfiles[-1]);
        print("Model {} loaded from previous state!".format(modelfiles[-1]));
        it = int(os.path.splitext(os.path.basename(modelfiles[-1]))[0][5:]) + 1
        # 加载第i个模型
        # i = 19
        # trainer.loadParameters(modelfiles[i-1]);
        # print("Model {} loaded from previous state!".format(modelfiles[i-1]));
        # it = int(os.path.splitext(os.path.basename(modelfiles[i-1]))[0][5:]) + 1

    for ii in range(1, it):
        trainer.__scheduler__.step()

    ## Evaluation code - must run on single GPU
    if args.eval == True:
    # if True:
        pytorch_total_params = sum(p.numel() for p in s.module.__S__.parameters())

        print('Total parameters: ', pytorch_total_params)
        print('Test list', args.test_list)

        sc, lab, _ = trainer.evaluateFromList(**vars(args))

        if args.gpu == 0:

            result = tuneThresholdfromScore(sc, lab, [1, 0.1]);

            fnrs, fprs, thresholds = ComputeErrorRates(sc, lab)
            mindcf, threshold = ComputeMinDcf(fnrs, fprs, thresholds, args.dcf_p_target, args.dcf_c_miss, args.dcf_c_fa)

            print('\n', time.strftime("%Y-%m-%d %H:%M:%S"), "VEER {:2.4f}".format(result[1]),
                  "MinDCF {:2.5f}".format(mindcf));

            if ("nsml" in sys.modules) and args.gpu == 0:
                training_report = {};
                training_report["summary"] = True;
                training_report["epoch"] = it;
                training_report["step"] = it;
                training_report["val_eer"] = result[1];
                training_report["val_dcf"] = mindcf;

                nsml.report(**training_report);

        return

    ## Save training code and params
    if args.gpu == 0:
        pyfiles = glob.glob('./*.py')
        strtime = datetime.datetime.now().strftime("%Y%m%d%H%M%S")

        zipf = zipfile.ZipFile(args.result_save_path + '/run%s.zip' % strtime, 'w', zipfile.ZIP_DEFLATED)
        for file in pyfiles:
            zipf.write(file)
        zipf.close()

        with open(args.result_save_path + '/run%s.cmd' % strtime, 'w') as f:
            f.write('%s' % args)

    ## Core training script
    for it in range(it, args.max_epoch + 1):

        train_sampler.set_epoch(it)

        clr = [x['lr'] for x in trainer.__optimizer__.param_groups]

        loss, traineer = trainer.train_network(train_loader, verbose=(args.gpu == 0));

        if args.gpu == 0:
            print('\n', time.strftime("%Y-%m-%d %H:%M:%S"),
                  "Epoch {:d}, TEER/TAcc {:2.2f}, TLOSS {:f}, LR {:f}".format(it, traineer, loss, max(clr)));
            scorefile.write(
                "Epoch {:d}, TEER/TAcc {:2.2f}, TLOSS {:f}, LR {:f} \n".format(it, traineer, loss, max(clr)));

        # 每一轮都保存模型
        # trainer.saveParameters(args.model_save_path + "/model%09d.model" % it);
        if it % args.test_interval == 0:

            sc, lab, _ = trainer.evaluateFromList(**vars(args))

            if args.gpu == 0:
                result = tuneThresholdfromScore(sc, lab, [1, 0.1]);

                fnrs, fprs, thresholds = ComputeErrorRates(sc, lab)
                mindcf, threshold = ComputeMinDcf(fnrs, fprs, thresholds, args.dcf_p_target, args.dcf_c_miss,
                                                  args.dcf_c_fa)

                eers.append(result[1])

                print('\n', time.strftime("%Y-%m-%d %H:%M:%S"),
                      "Epoch {:d}, VEER {:2.4f}, MinDCF {:2.5f}".format(it, result[1], mindcf));
                scorefile.write("Epoch {:d}, VEER {:2.4f}, MinDCF {:2.5f}\n".format(it, result[1], mindcf));

                # 每args.test_interval 轮保存模型
                trainer.saveParameters(args.model_save_path + "/model%09d.model" % it);

                with open(args.model_save_path + "/model%09d.eer" % it, 'w') as eerfile:
                    eerfile.write('{:2.4f}'.format(result[1]))

                scorefile.flush()

        if ("nsml" in sys.modules) and args.gpu == 0:
            training_report = {};
            training_report["summary"] = True;
            training_report["epoch"] = it;
            training_report["step"] = it;
            training_report["train_loss"] = loss;
            training_report["min_eer"] = min(eers);

            nsml.report(**training_report);

    if args.gpu == 0:
        scorefile.close();


## ===== ===== ===== ===== ===== ===== ===== =====
## Main function
## ===== ===== ===== ===== ===== ===== ===== =====


def main():
    if ("nsml" in sys.modules) and not args.eval:
        args.save_path = os.path.join(args.save_path, SESSION_NAME.replace('/', '_'))

    args.model_save_path = args.save_path + "/model"
    args.result_save_path = args.save_path + "/result"
    args.feat_save_path = ""

    os.makedirs(args.model_save_path, exist_ok=True)
    os.makedirs(args.result_save_path, exist_ok=True)

    n_gpus = torch.cuda.device_count()

    print('Python Version:', sys.version)
    print('PyTorch Version:', torch.__version__)
    print('Number of GPUs:', torch.cuda.device_count())
    print('Save path:', args.save_path)

    if args.distributed:
        mp.spawn(main_worker, nprocs=n_gpus, args=(n_gpus, args))
    else:
        main_worker(0, None, args)


if __name__ == '__main__':
    main()
