import argparse
import logging
import os
import time
import random

from utils.log_utils import config_root_logger
from utils.train_utils import set_random_seed


def parser_sets():
    parser = argparse.ArgumentParser(description='tdnn training')
    parser.add_argument('--adam', dest='adam', action='store_true', help='Replace SGD with Adam')
    parser.add_argument('--betas', default=(0.9, 0.999), nargs='+', help='ADAM betas')
    parser.add_argument('--batch-size', default=20, type=int, help='Batch size for training')
    parser.add_argument('--best-val-model-name', default='tdnn_final.pth',
                        help='Location to save best validated model within the save folder')
    parser.add_argument('--cuda', dest='cuda', action='store_true', help='Use cuda to train model') #在 Python 的 argparse 模块中，action='store_true' 的作用是将参数的值设为 True，如果该参数在命令行中出现。默认情况下，这个参数的值是 False，除非用户在命令行中明确指定它
    parser.add_argument('--checkpoint', dest='checkpoint', action='store_true',
                        help='Enables checkpoint saving of model')
    parser.add_argument('--checkpoint-per-iteration', default=0, type=int,
                        help='Save checkpoint per iteration. 0 means never save')
    parser.add_argument('--continue-from', default='', help='Continue from checkpoint model')
    parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
    parser.add_argument('--distributed', dest='distributed', action='store_true', help='Use cuda to train model')
    parser.add_argument('--decay-epoch', '--decay_epoch', default=5, type=int, help='set weight decay epoch step')
    parser.add_argument('--epochs', default=70, type=int, help='Number of training epochs')
    parser.add_argument('--eps', default=1e-8, type=float, help='ADAM eps')
    parser.add_argument('--finetune', dest='finetune', action='store_true',
                        help='Finetune the model from checkpoint "continue_from"')
    parser.add_argument('--id', default='TDNN training', help='Identifier for visdom/tensorboard run')
    parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
    parser.add_argument('--labels-path', default='Work/data/vocab.txt', help='all vocab for transcription')
    parser.add_argument('--local-rank', '--local_rank', default=-1, type=int, help='local rank')
    parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float, help='initial learning rate')
    parser.add_argument('--lambda-kd', dest='lambda_kd', type=float, default=3000.0)
    parser.add_argument('--log-dir', default='Work/visualize/', help='Location of tensorboard log')
    parser.add_argument('--log-params', dest='log_params', action='store_true',
                        help='Log parameter values and gradients')
    parser.add_argument('--learning-anneal', default=1.1, type=float,
                        help='Annealing applied to learning rate every epoch')
    parser.add_argument('--load-auto-checkpoint', dest='load_auto_checkpoint', action='store_true',
                        help='Enable when handling interruptions. Automatically load the latest checkpoint from the '
                             'save folder')
    parser.add_argument('--loss-scale', default=1,
                        help='Loss scaling used by Apex.'
                             ' Default is 1 due to warp-ctc not supporting scaling of gradients')
    parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
    parser.add_argument('--max-norm', default=400, type=int, help='Norm cutoff to prevent explosion of gradients')
    parser.add_argument('--max-durations', default=30.0, type=float, help='max durations of wavfile')
    parser.add_argument('--min-durations', default=0.0, type=float, help='min durations of wavfile')
    parser.add_argument('--num-workers', default=4, type=int, help='Number of workers used in data-loading')
    parser.add_argument('--net-arch', dest='net_arch', type=str, default='base',
                        help="base, super, large, medium or small")
    parser.add_argument('--noise-dir', default=None,
                        help='Directory to inject noise into audio. If default, noise Inject not added')
    parser.add_argument('--noise-prob', default=0.4, type=float, help='Probability of noise being added per sample')
    parser.add_argument('--noise-min', default=0.0,
                        help='Minimum noise level to sample from. (1.0 means all noise, not original signal)',
                        type=float)
    parser.add_argument('--noise-max', default=0.5,
                        help='Maximum noise levels to sample from. Maximum 1.0', type=float)
    parser.add_argument('--opt-level', type=str, default='O0',
                        help='Optimization level to use for training using Apex. Default is FP32 training. '
                             'O1 is mixed precision and recommended for mixed precision hardware')
    parser.add_argument('--reverberation', dest='reverberation', action='store_true',
                        help='add reverberation to audio.')
    parser.add_argument('--reverb-prob', default=0.4, type=float, help='Probability of reverberation added per sample')
    parser.add_argument('--save-n-recent-models', default=0, type=int,
                        help='Maximum number of checkpoints to save. '
                             'If the max is reached, we delete older checkpoints.'
                             'Default is there is no maximum number, so we save all checkpoints.')
    parser.add_argument('--save-folder', default='models/', help='Location to save epoch models')
    parser.add_argument('--speed-volume-perturb', dest='speed_volume_perturb', action='store_true',
                        help='Use random tempo and gain perturbations.')
    parser.add_argument('--seed', default=123456, type=int, help='Seed to generators')
    parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
    parser.add_argument('--teacher-model-path', default='', help='teacher model path')
    parser.add_argument('--train-manifest', metavar='DIR',
                        help='path to train manifest', default='Work/data/manifest.*.train')
    parser.add_argument('--tensorboard', dest='tensorboard', action='store_true', help='Turn on tensorboard graphing')
    parser.add_argument('--visdom', dest='visdom', action='store_true', help='Turn on visdom graphing')
    parser.add_argument('--val-manifest', metavar='DIR',
                        help='path to validation manifest', default='Work/data/manifest.*.dev')
    parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')
    parser.add_argument('--word-form', dest='word_form', type=str, default='sinogram')
    parser.add_argument('--wd', '--weight_decay', default=1e-5, type=float, help='Initial weight decay')
    parser.add_argument('--vector-model-arch', default='V1', type=str, help='path to load wav2vec model')
    parser.add_argument('--vector-model-path',
                        default='models/V1/wav2vec_large.pt',
                        type=str,
                        help='path to load wav2vec model')
    args = parser.parse_args()
    return args

def main(args):
    # logging INFO
    log_dir = os.path.join(os.path.dirname(os.path.dirname(args.train_manifest)), 'log')
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    log_prefix = args.net_arch
    config_root_logger(log_dir, prefix=log_prefix)
    logging.info("我是耿雪龙, 开始训练进程")
    logging.info("Load config:")
    logging.info("***************************************")
    for param, value in args.__dict__.items():
        logging.info("{} = {}".format(param, value))
    logging.info("***************************************")

    logging.info(f'设置random seed: {args.seed}')
    set_random_seed(args.seed)
    logging.info(f'测试random seed 的有效性')
    random_int = random.randint(0, 10000)
    random_float = random.random()*100
    logging.info(f'随机数，整数：{random_int},小数：{random_float}')




if __name__ == '__main__':
    args = parser_sets()
    main(args)