from unicodedata import bidirectional
from numpy.core.fromnumeric import transpose
from torch.functional import norm
from representation import *
import torch
import torch.nn as nn
import random
import numpy as np
import time
import os
import math
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from tqdm import tqdm
seed = 2021
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True


def reformat_midi(mid, name=None, verbose=True, write_to_file=False, override_time_info=True):
    """
    Performs sanity check and reformats a midi file based on the following criteria:
    - Flattens all messages onto a single track, making it of midi file type 0.
    - Converts 'note_on' messages with velocity=0 to 'note_off' messages.
    - Checks if the last 'note_on' has a corresponding 'note_off' message, adding one if needed.
    - Adds an 'end_of_track' metamessage that is a multiple of the time_signature.
    Reformatting will make the file load better (i.e. nicer looking) in music21 and other musicxml programs.
    Parameters
    ----------
    mid: str or mido.MidiFile:
        Valid path to a midi file or midi stream.
    name: str
        different name...
    verbose: bool
        Print messages to the console while formatting
    write_to_file: bool
        Overwrite the original midi file with the newly formatted data.
    override_time_info: bool
        Override original tempo and time signature.
    Return
    ------
    mid: mido.MidiFile
        A pythonised midi file for further manipulation.
    Notes
    -----
    override_time_info ignores the original tempo and time signature,
    forcing them to 'set_tempo' = 125 bmp's and 'time_signature' = 4/4.
    This is useful for most cases of analysis of EDM content.
    """


    if not mid.filename:
        mid.filename = "midi_track"

    if not name:
        name = os.path.join(os.getcwd(), mid.filename)

    print("file name:", mid.filename)

    if verbose:
        print("file type:", mid.type)
        print("ticks per quarter note:", mid.ticks_per_beat)
        print("number of tracks", len(mid.tracks))
        print(mid.tracks)

    EXCLUDED_MSG_TYPES = {"sequence_number", "text", "copyright", "track_name", "instrument_name",
                          "lyrics", "marker", "cue_marker", "device_name", "channel_prefix",
                          "midi_port", "sequencer_specific", "end_of_track", 'smpte_offset'}

    if override_time_info:
        EXCLUDED_MSG_TYPES.add('time_signature')
        EXCLUDED_MSG_TYPES.add('set_tempo')

    # if type 2, do nothing!
    if mid.type == 2:
        print("Midi file type {}. I did not dare to change anything.".format(mid.type))
        return None

    else:
        if verbose and mid.type == 1:
            # if type 1, convert to type 0
            print("Converting file type 1 to file type 0 (single track).")

        flat_track = MidiTrack()
        flat_track.append(MetaMessage("track_name", name=os.path.split(name)[1], time=0))
        print("NAME", os.path.split(name)[1])
        flat_track.append(MetaMessage("track_name", name="unnamed", time=0))
        flat_track.append(MetaMessage("instrument_name", name="Piano", time=0))

        if override_time_info:
            if verbose:
                print('WARNING: Ignoring Tempo and Time Signature Information.')
            flat_track.append(MetaMessage("set_tempo", tempo=480000, time=0))
            flat_track.append(MetaMessage("time_signature", numerator=4, denominator=4, time=0))

        for track in mid.tracks:
            for msg in track:
                if any(msg.type == msg_type for msg_type in EXCLUDED_MSG_TYPES):
                    if verbose:
                        print("IGNORING", msg)
                else:
                    flat_track.append(msg)

        # replace the 'tracks' field with a single track containing all the messages.
        # later on we can check for duplicates in certain fields (tempo, timesignature, key)

        new_track = MidiTrack()
        # Convert 'note_on' messages with velocity 0 to note_off messages:
        for msg in flat_track:
            # if msg.type == 'note_on' and msg.velocity == 0:
            #     if verbose:
            #         print("Replacing 'note_on' with velocity=0 with a 'note_off' message (track[{}])".format(mid.tracks[0].index(msg)))
            #     mid.tracks[0].insert(mid.tracks[0].index(msg), Message('note_off', note=msg.note, velocity=msg.velocity, time=msg.time))
            #     mid.tracks[0].remove(msg)
            #print(msg.type)
            if not (msg.type == 'control_change' or msg.type == 'pitchwheel' or msg.type == 'program_change'):# and not msg.is_meta:
                #print("rm %s"%msg.type)
                new_track.append(msg)
        
        mid.tracks.clear()
        mid.type = 0
        mid.tracks.append(new_track)
    
    # Add a 'note_off' event at the end of track if it were missing:
    events = []
    for msg in mid.tracks[0]:
        if msg.type == 'note_on' or msg.type == 'note_off':
            events.append(msg)
    if len(events) > 0:
        if events[-1].type == 'note_on':
            mid.tracks[0].append(Message('note_off', note=events[-1].note, velocity=0, time=0))
            if verbose:
                print("WARNING: 'note_off' missing at the end of file. Adding 'note_off' message.")

    # Set the duration of the file to a multiple of the Time Signature:
    ticks_per_beat = mid.ticks_per_beat
    beats_per_bar = 4
    dur_in_ticks = 0
    for msg in mid.tracks[0]:
        dur_in_ticks += msg.time
        if msg.type == 'set_tempo':
            if verbose:
                print("Tempo: {} BPM".format(60000000 / msg.tempo))
        if msg.type == 'time_signature':
            beats_per_bar = msg.numerator
            ticks_per_beat = (4 / msg.denominator) * mid.ticks_per_beat
            if verbose:
                print("Time Signature: {}/{}".format(msg.numerator, msg.denominator))

    ticks_per_bar = beats_per_bar * ticks_per_beat
    dur_in_measures = dur_in_ticks / ticks_per_bar
    expected_dur_in_ticks = int(math.ceil(dur_in_measures) * ticks_per_bar)
    ticks_to_end_of_bar = expected_dur_in_ticks - dur_in_ticks
    print(ticks_to_end_of_bar)

    if mid.tracks[0][-1].type == "end_of_track":
        ticks_to_end_of_bar += mid.tracks[0][-1].time
        mid.tracks[0].pop(-1)

    mid.tracks[0].append(MetaMessage('end_of_track', time=ticks_to_end_of_bar))

    if verbose:
        if dur_in_ticks == expected_dur_in_ticks:
            print("Original duration already a multiple of Time Signature.")
            print(dur_in_ticks, "ticks,", dur_in_measures, "bars.")
        else:
            print("Original duration:", dur_in_ticks, "ticks,", dur_in_measures, "bars.")
            new_dur_in_ticks = 0
            for msg in mid.tracks[0]:
                new_dur_in_ticks += msg.time
            print("Final duration:", new_dur_in_ticks, "ticks,", new_dur_in_ticks / ticks_per_bar, "bars.")

    if write_to_file:
        mid.save(name)
        if verbose:
            print("(Over)writting mid file with changes.\n")

    return mid


class Net(nn.Module):
    def __init__(self, in_ch = 3, hid = 16, layers = 3, out_ch = 2, reduce = 'last'):
        super(Net, self).__init__()
        self.dimension = hid
        self.encoder = nn.LSTM(input_size=in_ch,
                            hidden_size=hid,
                            num_layers=layers,
                            batch_first=True,
                            bidirectional=True)
        #self.pooling = getattr(torch, reduce)
        self.decoder = nn.Sequential(
            #nn.Dropout(p=0.5),
            nn.Linear(2 * hid, out_ch)
        )

    def forward(self, x, length) -> torch.tensor:
        # x.shape = [batch, len, ch]
        packed_input = pack_padded_sequence(x, length, batch_first=True, enforce_sorted=False)
        packed_output, _ = self.encoder(packed_input)
        output, _ = pad_packed_sequence(packed_output, batch_first=True)

        out_forward = output[range(len(output)), length - 1, :self.dimension]
        out_reverse = output[:, 0, self.dimension:]
        out_reduced = torch.cat((out_forward, out_reverse), 1)

        text_fea = self.decoder(out_reduced)
        x = torch.squeeze(text_fea, 1)
        return x

def melody_to_tensor(melody : Melody) -> torch.tensor:
    norm = torch.tensor([128,2,96],dtype=torch.float)
    eles = []
    lens = np.sum([len(measure.measure_list) for measure in melody.melody_list])
    for measure in melody.melody_list:
        for note in measure.measure_list:
            ele = torch.tensor([note.note_pitch, note.beats, note.velocity],dtype=torch.float)
            eles.append(ele.view(1, -1))
    out = torch.cat(eles,dim=-2)
    zeros = out.new_zeros(64,out.shape[1])
    zeros[:out.shape[0]] = out
    zeros = zeros / norm
    lens = torch.tensor(lens).long()
    return zeros.view(1,64,-1), lens

def melodys_to_tensor(melodys : list) -> torch.tensor:
    mels = []
    lens = []
    for melody in melodys:
        mel, ilen = melody_to_tensor(melody)
        mels.append(mel)
        lens.append(ilen.view(1))
    out = torch.cat(mels,dim=0)
    lens = torch.cat(lens,dim=0)
    return out, lens

class Discriminator(nn.Module):
    def __init__(self, in_ch = 3, hid = 16, layers = 1, optim = 'Adam', device = 'cuda', steps = 100000):
        super(Discriminator, self).__init__()
        self.Net = Net().to(device)
        self.optim = getattr(torch.optim, optim)(self.Net.parameters(), lr=0.01)
        self.device = device
        self.criterion = nn.CrossEntropyLoss()
        def EqAcc(out,y):
            return torch.eq(torch.argmax(out,dim=-1).view(-1),y.view(-1)).float().mean()
        self.accuracy = EqAcc
        self.steps = steps
    

    def fit(self, gens : list, manuals : list) -> None:
        # init_data
        #np.random.shuffle(gens)
        #np.random.shuffle(manuals)
        gen_num = len(gens) 
        manual_num = len(manuals)
        tot_num = gen_num + manual_num
        gens, glen = melodys_to_tensor(gens)
        manuals, mlen = melodys_to_tensor(manuals)
        # [gen, manual], [0,1]
        mels = torch.cat([gens, manuals], dim=0)
        lens = torch.cat([glen,mlen], dim=0)
        labels = torch.zeros(tot_num,dtype=torch.long)
        labels[gen_num:] = 1
        # shuffle
        indx = np.arange(0,tot_num,1,dtype=int)
        np.random.shuffle(indx)
        indx = torch.from_numpy(indx).long()
        mels = mels[indx].to(self.device)
        labels = labels[indx].to(self.device)
        lens = lens[indx].long()#.to(self.device)
        #-----------------------------------------------------------------
        self.Net.train()
        for i in range(self.steps):
            tt = time.time()
            out = self.Net(mels, lens)
            loss = self.criterion(out, labels)
            self.optim.zero_grad()
            loss.backward()
            self.optim.step()
            with torch.no_grad():
                Ave_loss = loss.mean().item()
                Ave_cor =  self.accuracy(out,labels).mean().item()
            print("[Epoch\t{}]loss:\t{:.4f}\tacc:\t{:.4f}\tDtime:{:.2f}".format(
                    i,Ave_loss,Ave_cor,time.time()-tt),flush=True)
    
    def save(self, path = 'checkp.pth'):
        state = {'model': self.Net.state_dict()}
        torch.save(state, path)

    def load(self, path = 'checkp.pth'):
        checkp = torch.load(path)
        self.Net.load_state_dict(checkp['model'])

    def eval(self, gen : Melody) -> float:
        with torch.no_grad():
            # init_data
            gen, lens = melody_to_tensor(gen)
            gen = gen.to(self.device)
            self.Net.eval()
            out = self.Net(gen, lens.view(1))[0]
            out = torch.softmax(out,dim=-1)
        return float(out[1])


if __name__ == '__main__':
    DNet = Discriminator(device='cuda')
    a = np.arange(0,2,1,dtype=int)
    np.random.shuffle(a)
    indx = torch.from_numpy(a)
    gens = []
    manuals = []
    names = np.sort(os.listdir('midi_out'))#[:1]
    for i, filename in tqdm(enumerate(names)):
        try:
            melody = Melody(key='C', filename=filename)
        except:
            continue
        if i < 1:
            gens.append(melody)
        elif i < 200:
            manuals.append(melody)
        else:
            break
    #print(names)
    #print(gens)
    DNet.fit(gens, manuals)
    print(DNet.eval(gens[0]))
