import torch
import os
import random

from utilities.argument_funcs import parse_generate_args, print_generate_args
from model.music_transformer import MusicTransformer
from utilities.constants import *
from utilities.device import get_device, use_cuda
from utilities.file_io import getRawMaestroDatasetFiles
from utilities.music_encoders import get_performance_encoder
from utilities.transforms import transform_midi_file_to_samples, MidiFileToNoteSequence,Augment,example_to_input_target_with_padding
import pathlib
from petastorm import make_reader
from petastorm.pytorch import DataLoader
from petastorm.transform import TransformSpec
from utilities.transforms import example_to_input_target_with_padding,deserialize_row

# main
def main():
    """
    ----------
    Author: Damon Gwinn
    ----------
    Entry point. Generates music from a model specified by command line arguments
    ----------
    """

    args = parse_generate_args()
    print_generate_args(args)

    if(args.force_cpu):
        use_cuda(False)
        print("WARNING: Forced CPU usage, expect model to perform slower")
        print("")

    os.makedirs(args.output_dir, exist_ok=True)

    def _train_transform_input_row(example):
        inputs,targets = example_to_input_target_with_padding(deserialize_row(example), args.max_sequence, use_random_crop=True)
        return {
          "inputs": inputs,
          "targets": targets
        }

    performance_encoder = get_performance_encoder()

    train_absolute_path = pathlib.Path(__file__).parent.joinpath(
        args.input_dir).joinpath("train").absolute().__str__()
    # Allocates a shuffling pool for the train loader.
    train_loader = DataLoader(make_reader('file://'+train_absolute_path, num_epochs=1,
                            transform_spec=TransformSpec(_train_transform_input_row)), batch_size=1, shuffling_queue_capacity=10000)
    
    for sample in train_loader:
        primer_sample = sample
        break
    primer = primer_sample["inputs"][0].to(get_device())

    model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads,
                d_model=args.d_model, dim_feedforward=args.dim_feedforward,
                max_sequence=args.max_sequence, rpr=args.rpr, vocab_size=performance_encoder.vocab_size).to(get_device())

    model.load_state_dict(torch.load(args.model_weights, map_location=torch.device('cpu')))

    # Saving primer first
    primer_file_path = os.path.join(args.output_dir, "primer.mid")
    temp_file_path = performance_encoder.decode(primer[:args.num_prime].cpu().numpy())
    os.rename(temp_file_path, primer_file_path)
    # Saving primer with the same length as the generated sample
    primer_counterpart_file_path = os.path.join(args.output_dir, "primer_counterpart.mid")
    temp_file_path = performance_encoder.decode(primer[:args.max_sequence].cpu().numpy())
    os.rename(temp_file_path, primer_counterpart_file_path)

    # GENERATION
    model.eval()
    with torch.set_grad_enabled(False):
        if(args.beam > 0):
            print("BEAM:", args.beam)
            beam_seq = model.generate(primer[:args.num_prime], args.target_seq_length, beam=args.beam)

            f_path = os.path.join(args.output_dir, "beam.mid")
            temp_file_path = performance_encoder.decode(beam_seq[0].cpu().numpy())
            os.rename(temp_file_path, f_path)
        else:
            print("RAND DIST")
            rand_seq = model.generate(primer[:args.num_prime], args.target_seq_length, beam=0)

            f_path = os.path.join(args.output_dir, "rand.mid")
            temp_file_path = performance_encoder.decode(rand_seq[0].cpu().numpy())
            os.rename(temp_file_path, f_path)




if __name__ == "__main__":
    main()
