import os
os.environ['TOKENIZERS_PARALLELISM'] = "false"
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import time
import logging
from apex import amp

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
 
from utilities import (create_folder, get_filename, create_logging, Mixup, 
    StatisticsContainer)
from models import (Cnn14, Cnn14_no_specaug, Cnn14_no_dropout, 
    Cnn6, Cnn10, ResNet22, ResNet38, ResNet54, Cnn14_emb512, Cnn14_emb128, 
    Cnn14_emb32, MobileNetV1, MobileNetV2, LeeNet11, LeeNet24, DaiNet19, 
    Res1dNet31, Res1dNet51, Wavegram_Cnn14, Wavegram_Logmel_Cnn14, 
    Wavegram_Logmel128_Cnn14, Cnn14_16k, Cnn14_8k, Cnn14_mel32, Cnn14_mel128, 
    Cnn14_mixup_time_domain, Cnn14_DecisionLevelMax, Cnn14_DecisionLevelAtt, Cnn14Bert)
from pytorch_utils import (move_data_to_device, count_parameters, count_flops, 
    do_mixup)
from data_generator import (AudioSetDataset, TrainSampler, BalancedTrainSampler, 
    AlternateTrainSampler, EvaluateSampler, collate_fn, AudioSetBiModalDataset, AudioCapsBiModalDataset, get_collate_fn)
from evaluate import Evaluator, EvaluatorBiModal
import config
from losses import get_loss_func
from tqdm import tqdm

def val_epoch(model, val_loader, device):
    batch_losses = []
    model.eval()
    for batch_data_val in tqdm(val_loader):
        # Move data to device
        for key in batch_data_val.keys():
            if key in {'texts'}:
                continue
            batch_data_val[key] = move_data_to_device(batch_data_val[key], device)

        with torch.no_grad():
            loss = model.module.train_forward(batch_data_val['waveform'], batch_data_val['texts'], batch_data_val['labels'])
        batch_losses.append(loss.detach().cpu().item())
    return np.mean(batch_losses)


def train(args):
    """Train AudioSet tagging model. 

    Args:
      dataset_dir: str
      workspace: str
      data_type: 'balanced_train' | 'full_train'
      window_size: int
      hop_size: int
      mel_bins: int
      model_type: str
      loss_type: 'clip_bce'
      balanced: 'none' | 'balanced' | 'alternate'
      augmentation: 'none' | 'mixup'
      batch_size: int
      learning_rate: float
      resume_iteration: int
      early_stop: int
      accumulation_steps: int
      cuda: bool
    """

    # Arugments & parameters
    workspace = args.workspace
    # data_type = args.data_type
    finetune_task = args.finetune_task
    train_audio_dir = args.train_audio_dir
    train_meta_dir = args.train_meta_dir
    val_audio_dir = args.val_audio_dir
    val_meta_dir = args.val_meta_dir
    sample_rate = args.sample_rate
    window_size = args.window_size
    hop_size = args.hop_size
    mel_bins = args.mel_bins
    fmin = args.fmin
    fmax = args.fmax
    model_type = args.model_type
    loss_type = args.loss_type
    balanced = args.balanced
    # augmentation = args.augmentation
    batch_size = args.batch_size
    learning_rate = args.learning_rate
    # resume_iteration = args.resume_iteration
    resume_checkpoint_path = args.resume_checkpoint_path
    train_epoches = args.train_epoches
    device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
    filename = args.filename
    bert_model_type = args.bert_model_type
    max_text_nums = args.max_text_nums
    max_seq_len = args.max_seq_len
    shared_dim = args.shared_dim
    aug_prob = args.aug_prob
    use_logit_scale = args.use_logit_scale

    num_workers = 8
    # clip_samples = config.clip_samples
    # classes_num = config.classes_num
    # loss_func = get_loss_func(loss_type)

    # Paths

    checkpoints_dir = os.path.join(workspace, f'checkpoints_{finetune_task}', filename, 
        'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
        sample_rate, window_size, hop_size, mel_bins, fmin, fmax), model_type, 
        'bert_type={}'.format(bert_model_type), 'balanced={}'.format(balanced), 
        'max_text_nums={}'.format(max_text_nums), 'max_seq_len={}'.format(max_seq_len), 'batch_size={}'.format(batch_size))
    create_folder(checkpoints_dir)

    logs_dir = os.path.join(workspace, f'logs_{finetune_task}', filename, 
        'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
        sample_rate, window_size, hop_size, mel_bins, fmin, fmax), model_type, 
        'bert_type={}'.format(bert_model_type), 'balanced={}'.format(balanced), 
        'max_text_nums={}'.format(max_text_nums), 'max_seq_len={}'.format(max_seq_len), 'batch_size={}'.format(batch_size))

    create_logging(logs_dir, filemode='w')
    logging.info(args)
    
    if 'cuda' in str(device):
        logging.info('Using GPU.')
        device = 'cuda'
    else:
        logging.info('Using CPU. Set --cuda flag to use GPU.')
        device = 'cpu'
    
    # Model
    Model = eval(model_type)
    model = Model(sample_rate=sample_rate, window_size=window_size, 
        hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax, 
        bert_model_type=bert_model_type, 
        max_seq_len=max_seq_len, shared_dim=shared_dim, use_logit_scale=use_logit_scale)
     
    params_num = count_parameters(model)
    # flops_num = count_flops(model, clip_samples)
    logging.info('Parameters num: {}'.format(params_num))
    # logging.info('Flops num: {:.3f} G'.format(flops_num / 1e9))

    train_dataset = AudioCapsBiModalDataset(train_audio_dir, train_meta_dir, sample_rate=sample_rate)
    val_dataset = AudioCapsBiModalDataset(val_audio_dir, val_meta_dir, sample_rate=sample_rate)

    # Data loader
    # label_texts = [], since we don't have a label set here, we only use in-batch negatives
    my_collate_fn_train = get_collate_fn([], max_text_nums, aug_prob=-1)  # no augmentation can be used here
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset, 
        collate_fn=my_collate_fn_train, batch_size=batch_size,
        num_workers=num_workers, pin_memory=True, shuffle=True)
        
    my_collate_fn_val = get_collate_fn([], max_text_nums, aug_prob=-1)
    val_loader = torch.utils.data.DataLoader(dataset=val_dataset, 
        collate_fn=my_collate_fn_val, batch_size=batch_size,
        num_workers=num_workers, pin_memory=True, shuffle=False)

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=learning_rate, 
        betas=(0.9, 0.999), eps=1e-08, weight_decay=0., amsgrad=True)

    train_bgn_time = time.time()
    
    # Resume training
    if resume_checkpoint_path != "":
        logging.info('Loading checkpoint {}'.format(resume_checkpoint_path))
        checkpoint = torch.load(resume_checkpoint_path)
        model.load_state_dict(checkpoint['model'], strict=False)
        iteration = 0

    else:
        iteration = 0
    
    if 'cuda' in str(device):
        model.to(device)
        # Mixed precision
        if args.fp16:
            model, optimizer = amp.initialize(model, optimizer, opt_level='O1')

    # Parallel
    print('GPU number: {}'.format(torch.cuda.device_count()))
    model = torch.nn.DataParallel(model)
    
    # time1 = time.time()
    train_loss_history = []
    for epoch in range(train_epoches):
        print(f"Epoch {epoch}")
        
        # train epoch
        pbar = tqdm(train_loader)
        for batch_data_dict in pbar:
            # Move data to device
            for key in batch_data_dict.keys():
                if key in {'texts'}:
                    continue
                batch_data_dict[key] = move_data_to_device(batch_data_dict[key], device)
            
            # Forward
            model.train()

            loss = model.module.train_forward(batch_data_dict['waveform'], batch_data_dict['texts'], batch_data_dict['labels'])

            # Backward
            if 'cuda' in str(device) and args.fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()
            train_loss_history.append(loss.detach().cpu().item())
            
            optimizer.step()
            optimizer.zero_grad()
            
            if iteration % 50 == 0:
                pbar.set_postfix_str(f"Loss: {np.mean(train_loss_history):.4f}")
                # print('--- Iteration: {}, train time: {:.3f} s / 50 iterations, Loss: {:.4f} ---'\
                #     .format(iteration, time.time() - time1, np.mean(train_loss_history)))
                # time1 = time.time()
                train_loss_history = []

            iteration += 1
        
        val_loss = val_epoch(model, val_loader, device)
        logging.info(f'Val avg batch Loss: {val_loss:.5f}')
        
        # Save model
        if epoch % args.save_interval == 0:
            checkpoint = {
                'epoch': epoch, 
                'model': model.module.state_dict()}

            checkpoint_path = os.path.join(
                checkpoints_dir, '{}_epochs.pth'.format(epoch))
                
            torch.save(checkpoint, checkpoint_path)
            logging.info('Model saved to {}'.format(checkpoint_path))

if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='Example of parser. ')
    subparsers = parser.add_subparsers(dest='mode')

    parser_train = subparsers.add_parser('train') 
    parser_train.add_argument('--workspace', type=str, required=True)
    parser_train.add_argument('--finetune_task', type=str, default="AudioCaps")
    parser_train.add_argument('--train_audio_dir', type=str, required=True)
    parser_train.add_argument('--train_meta_dir', type=str, required=True)
    parser_train.add_argument('--val_audio_dir', type=str, required=True)
    parser_train.add_argument('--val_meta_dir', type=str, required=True)
    parser_train.add_argument('--sample_rate', type=int, default=32000)
    parser_train.add_argument('--window_size', type=int, default=1024)
    parser_train.add_argument('--hop_size', type=int, default=320)
    parser_train.add_argument('--mel_bins', type=int, default=64)
    parser_train.add_argument('--fmin', type=int, default=50)
    parser_train.add_argument('--fmax', type=int, default=14000) 
    parser_train.add_argument('--model_type', type=str, default="Cnn14Bert")
    parser_train.add_argument('--loss_type', type=str, default='clip_bce', choices=['clip_bce'])
    parser_train.add_argument('--balanced', type=str, default='balanced', choices=['none', 'balanced', 'alternate'])
    # parser_train.add_argument('--augmentation', type=str, default='mixup', choices=['none', 'mixup'])
    parser_train.add_argument('--batch_size', type=int, default=16)
    parser_train.add_argument('--learning_rate', type=float, default=2e-4)
    # parser_train.add_argument('--resume_iteration', type=int, default=0)
    parser_train.add_argument('--resume_checkpoint_path', type=str, default="")
    parser_train.add_argument('--train_epoches', type=int, default=20)
    parser_train.add_argument('--cuda', action='store_true', default=False)
    parser_train.add_argument('--bert_model_type', type=str, default='bert-base-uncased')
    parser_train.add_argument('--max_text_nums', type=int, default=64)
    parser_train.add_argument('--max_seq_len', type=int, default=16)
    parser_train.add_argument('--shared_dim', type=int, default=1024)
    parser_train.add_argument('--aug_prob', type=float, default=-1)
    parser_train.add_argument('--save_interval', type=int, default=1)
    parser_train.add_argument('--use_logit_scale', action='store_true', default=False)
    parser_train.add_argument('--fp16', action='store_true', default=False)
    args = parser.parse_args()
    args.filename = get_filename(__file__)

    if args.mode == 'train':
        train(args)

    else:
        raise Exception('Error argument!')