import argparse
import os
import pickle
import json
import torch
import note_seq
from utilities.augmentation import augment_note_sequence
import itertools
from dataset.piano import AugmentedPianoDataset, Augment, MidiFileToNoteSequence, PadIndices
from torch.utils.data import Dataset, DataLoader
from utilities.music_encoders import PAD_ID, EOS_ID, get_performance_encoder
from utilities.device import cpu_device
from utilities.constants import PIANO_MAX_MIDI_PITCH, PIANO_MIN_MIDI_PITCH
from progress.bar import Bar
import webdataset as wds

JSON_FILE = "maestro-v2.0.0.json"
SCORE_BPM = 120.0
BEAT = note_seq.NoteSequence.TextAnnotation.BEAT
# Number of times entire input performances will be split.
NUM_REPLICATIONS = 1
ABSOLUTE_TIMING = False

STRETCH_FACTORS = [0.95, 0.975, 1.0, 1.025, 1.05]
TRANSPOSE_AMOUNTS = [-3, -2, -1, 0, 1, 2, 3]
# STRETCH_FACTORS = [1.0]
# TRANSPOSE_AMOUNTS = [0]

augment_params = itertools.product(
    STRETCH_FACTORS, TRANSPOSE_AMOUNTS)

# prep_midi


def prep_midi(input_dir, output_dir, max_seq):
    """
    ----------
    Pre-processes the maestro dataset, putting processed midi data (train, eval, test) into the
    given output folder
    ----------
    """

    train_dir = os.path.join(output_dir, "train")
    os.makedirs(train_dir, exist_ok=True)
    val_dir = os.path.join(output_dir, "val")
    os.makedirs(val_dir, exist_ok=True)
    test_dir = os.path.join(output_dir, "test")
    os.makedirs(test_dir, exist_ok=True)

    maestro_json_file = os.path.join(input_dir, JSON_FILE)
    if(not os.path.isfile(maestro_json_file)):
        print("ERROR: Could not find file:", maestro_json_file)
        return False

    maestro_json = json.load(open(maestro_json_file, "r"))
    print("Found", len(maestro_json), "pieces")
    print("Preprocessing...")

    trainFiles = []
    validFiles = []
    testFiles = []

    for piece in maestro_json:
        mid = os.path.join(input_dir, piece["midi_filename"])
        split_type = piece["split"]

        if(split_type == "train"):
            trainFiles.append(mid)
        elif(split_type == "validation"):
            validFiles.append(mid)

        elif(split_type == "test"):
            testFiles.append(mid)
        else:
            print("ERROR: Unrecognized split type:", split_type)
            return False

    trainDatasets = [AugmentedPianoDataset(trainFiles, get_performance_encoder(), transforms=[MidiFileToNoteSequence(), Augment(midi_encoder=get_performance_encoder(
    ), stretch_amounts=[stretch_factor], transpose_amounts=[transpose_amount], min_pitch=PIANO_MIN_MIDI_PITCH, max_pitch=PIANO_MAX_MIDI_PITCH), PadIndices(max_seq)], max_seq=max_seq, random_seq=None) for stretch_factor, transpose_amount in augment_params]

    validDataset = AugmentedPianoDataset(validFiles, get_performance_encoder(
    ), transforms=[MidiFileToNoteSequence(), Augment(midi_encoder=get_performance_encoder(
    ), stretch_amounts=[1.0], transpose_amounts=[0], min_pitch=PIANO_MIN_MIDI_PITCH, max_pitch=PIANO_MAX_MIDI_PITCH), PadIndices(max_seq)], max_seq=max_seq, random_seq=None)

    testDataset = AugmentedPianoDataset(testFiles, get_performance_encoder(
    ), transforms=[MidiFileToNoteSequence(), Augment(midi_encoder=get_performance_encoder(
    ), stretch_amounts=[1.0], transpose_amounts=[0], min_pitch=PIANO_MIN_MIDI_PITCH, max_pitch=PIANO_MAX_MIDI_PITCH), PadIndices(max_seq)], max_seq=max_seq, random_seq=None)

    empty_seq   = torch.full((max_seq, ), PAD_ID, dtype=torch.long, device=cpu_device())
    
    valBar = Bar('Processing validation examples', max=len(validDataset))
    val_base = os.path.join(val_dir, "dataset")
    sink = wds.ShardWriter(f"{val_base}-%04d.tar", maxsize=1000000000)
    for i_batch, sample_batched in enumerate(DataLoader(dataset=validDataset, batch_size=1, num_workers=8)):
      sink.write({
            "__key__": str(i_batch),
            "inputs.pth": sample_batched[0][0],
            "targets.pth": sample_batched[1][0],
        })
      valBar.next()
    sink.close()
    valBar.finish()

    testBar = Bar('Processing test examples', max=len(testDataset))
    test_base = os.path.join(test_dir, "dataset")
    sink = wds.ShardWriter(f"{test_base}-%04d.tar", maxsize=1000000000)
    for i_batch, sample_batched in enumerate(DataLoader(dataset=testDataset, batch_size=1, num_workers=8)):
      sink.write({
            "__key__": str(i_batch),
            "inputs.pth": sample_batched[0][0],
            "targets.pth": sample_batched[1][0],
        })
      testBar.next()
    sink.close()
    testBar.finish()

    trainBar = Bar('Processing training examples', max=len(trainDatasets)*len(trainDatasets[0]))
    train_base = os.path.join(train_dir, "dataset")
    sink = wds.ShardWriter(f"{train_base}-%04d.tar", maxsize=1000000000)
    train_example_count = 0
    for trainDataset in trainDatasets:
      for i_batch, sample in enumerate(DataLoader(dataset=trainDataset, batch_size=1, num_workers=16)):
          trainBar.next()
          sample_input = sample[0][0]
          sample_output = sample[1][0]
          if torch.all(torch.eq(sample_input,empty_seq)) or torch.all(torch.eq(sample_output,empty_seq)):
            continue
          sink.write({
              "__key__": str(train_example_count),
              "inputs.pth": sample_input,
              "targets.pth": sample_output,
          })
          train_example_count += 1
    trainBar.finish()
    sink.close()

            

# parse_args


def parse_args():
    """
    ----------
    Parses arguments for preprocess_midi using argparse
    ----------
    """

    parser = argparse.ArgumentParser()

    parser.add_argument("input_dir", type=str,
                        help="Root folder for the midi files")
    parser.add_argument("max_seq", type=int, default=2048,
                        help="Max sequence length")
    parser.add_argument("-output_dir", type=str, default="./data/processed",
                        help="Output folder to put the preprocessed midi into")

    return parser.parse_args()

# main


def main():
    """
    ----------
    Entry point. Preprocesses maestro and saved midi to specified output folder.
    ----------
    """

    args = parse_args()
    input_dir = args.input_dir
    output_dir = args.output_dir
    max_seq = args.max_seq

    print("Preprocessing midi files and saving to", output_dir, "with max_seq set to", max_seq)
    prep_midi(input_dir, output_dir, max_seq)
    print("Done!")
    print("")


if __name__ == "__main__":
    main()
