import gc
import os
import random

import gym
import librosa
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchaudio.models.wavernn
import torchvision
from gym import spaces
from scipy.signal import resample
from stable_baselines3 import DDPG
from torch import nn
import torch.nn.functional as F
import sounddevice as sd
import soundfile as sf
from torch.optim import lr_scheduler
from torchaudio.models.wavernn import WaveRNN
from torchaudio.transforms import MelSpectrogram

from Artear.utils.simpledaw import SimpleDAW
from SynthSym.dataset import load_audio_as_array, AudioDataset, SynthOperationDataset, SynthParamsDataset

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
print(device)


def audio_similarity(audio1, audio2, hop_length=128):
    _sum = torch.zeros(1, requires_grad=True)
    _range = torch.arange(0, audio1.shape[0], hop_length)
    for i in _range:
        if i + hop_length <= audio1.shape[0]:
            _sum += F.cosine_similarity(torch.tensor(audio1[i:i + hop_length], requires_grad=True),
                                        torch.tensor(audio2[i:i + hop_length], requires_grad=True), dim=-1)
            # _sum += F.cosine_similarity(audio1[i:i+hop_length, 0], audio2[i:i+hop_length, 0])
            # _sum += F.cosine_similarity(audio1[i:i+hop_length, 1], audio2[i:i+hop_length, 1])
    # _sum /= _range.shape[0] * 2
    _sum = _sum / _range.size(0)
    return _sum,


def mfcc_cosine(audio1, audio2, sample_rate):
    mfcc = torchaudio.transforms.MFCC(sample_rate).to(device)
    mfcc1 = mfcc(audio1)
    mfcc2 = mfcc(audio2)
    # plt.imshow(mfcc1[0].cpu().detach().numpy())
    # plt.show()
    # plt.imshow(mfcc2[0].cpu().detach().numpy())
    # plt.show()
    mfcc1 = torch.concat([mfcc1.reshape(-1), mfcc1.reshape(-1)]) if mfcc2.size(0) == 2 else mfcc1.reshape(-1)
    mfcc2 = mfcc2.reshape(-1)
    return F.cosine_similarity(mfcc1, mfcc2, dim=-1),


class OperationModel(torch.nn.Module):
    def __init__(self, sample_rate, n_channel, seq_len, n_classes, hop_length=200):
        super().__init__()
        self.wavernn = WaveRNN(upsample_scales=[5, 5, 8], n_classes=n_classes, hop_length=hop_length).to(device)
        # self.wavernn2 = WaveRNN(upsample_scales=[5, 5, 8], n_classes=n_classes, hop_length=hop_length).to(device)
        self.mel_spec = MelSpectrogram(sample_rate).to(device)
        self.fc_a = nn.Linear(seq_len, seq_len)

        self.fc_p = nn.Linear(seq_len, n_classes)
        # print(seq_len, n_channel *
        #       ((seq_len // hop_length + 1 if seq_len % hop_length != 0 else seq_len // hop_length)
        #        - 5 + 1) * hop_length, n_classes)
        self.fc = nn.Linear(n_channel *
                            ((seq_len // hop_length + 1)
                             - 5 + 1) * hop_length * n_classes + n_classes, seq_len)

        self.seq_len = seq_len
        self.hop_length = hop_length

    def forward(self, data):
        x_a, x_p = data
        x_a = torch.unsqueeze(x_a, dim=1)
        specgram = self.mel_spec(x_a)
        # print(specgram.size())
        x_a = self.wavernn(
            x_a[..., :((specgram.size(-1))
                       - 5 + 1) * self.hop_length],
            specgram)
        # print(x_a.size())
        x_a = x_a.view(x_a.size(0), -1)

        x_a, x_p = x_a.float(), x_p.float()
        # print(x_a.size(), x_p.size())
        x = self.fc(torch.concat([x_a, x_p], dim=-1))

        y_a = self.fc_a(x)
        y_a = torch.tanh(y_a)

        y_p = self.fc_p(x)
        y_p = torch.sigmoid(y_p)
        y_p = y_p - x_p
        return y_a, y_p


class SpecOperationModel(torch.nn.Module):
    def __init__(self, n_params, n_spaces, pretrained=False):
        super(SpecOperationModel, self).__init__()
        self.layer = torchvision.models.vgg19_bn(pretrained)

        self.fully_connect1 = torch.nn.Linear(1000, n_params * n_spaces)
        self.fully_connect2 = torch.nn.Linear(n_params * n_spaces, n_params * n_spaces)
        self.fully_connect3 = torch.nn.Linear(n_spaces, n_spaces)

        self.n_params = n_params

    def forward(self, x):
        x = torch.transpose(x, 1, 3)
        x = self.layer(x)
        # x = self.vgg.features(x)
        x = self.fully_connect1(x)
        x = torch.relu(x)
        x = self.fully_connect2(x)
        x = torch.relu(x)
        x = x.view([x.size()[0], self.n_params, -1])
        x = self.fully_connect3(x)
        x = torch.softmax(x, -1)
        return x


class LSTMNet(nn.Module):
    def __init__(self, embedding_size, n_classes, hidden_dim, num_layers, dropout=0.5, bidirectional=False):
        super(LSTMNet, self).__init__()
        print(embedding_size)
        self.embedding = torch.nn.Embedding(embedding_size[-2], embedding_size[-1])
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.dropout = dropout
        self.lstm = nn.LSTM(embedding_size[-1], hidden_dim, num_layers=num_layers, batch_first=True,
                            bidirectional=bidirectional)
        self.classifier = nn.Sequential(nn.Dropout(dropout),
                                        nn.Linear(hidden_dim, n_classes),
                                        nn.Softmax())
        self.bidirectional = bidirectional
        if self.bidirectional:
            self.h0 = torch.randn(self.num_layers * 2, embedding_size[-1], self.hidden_dim).to(device)
            self.c0 = torch.randn(self.num_layers * 2, embedding_size[-1], self.hidden_dim).to(device)
        else:
            self.h0 = torch.randn(self.num_layers, embedding_size[-1], self.hidden_dim).to(device)
            self.c0 = torch.randn(self.num_layers, embedding_size[-1], self.hidden_dim).to(device)

    def forward(self, inputs):
        inputs = self.embedding(inputs)
        x, _ = self.lstm(inputs, (self.h0, self.c0))
        x = x[:, -1, :]
        x = self.classifier(x)
        return x


class SymNet(torch.nn.Module):
    def __init__(self, simpledaw, param_models_dir, sample_rate, max_duration, batch_size, n_channel, seq_len, n_params,
                 n_sub_models,
                 hidden_dim, num_layers, dataset_config):
        super().__init__()
        self.simpledaw = simpledaw
        self.max_duration = max_duration

        self.param_models_dir = param_models_dir
        self.sub_models = []
        # for i in range(n_sub_models):
        #     param_model_path = param_models_dir + '/{}.pth'.format(random.randint(0, n_params - 1))
        #     sub_m = OperationModel(config['dataset_config']['sample_rate'], 1, data_shape[-1], params_delta.shape[-1])
        #     if os.path.exists(param_model_path):
        #         sub_m.load_state_dict(torch.load(param_model_path))
        #     sub_m.train(False)
        #     self.sub_models.append(sub_m.to(device))
        # self.sigma = torch.ones((n_sub_models,))
        self.weights = torch.nn.Parameter(torch.rand(size=(n_sub_models, n_params)), requires_grad=True)
        self.register_parameter('sub_m_prob', self.weights)

        self.embedding_size = (batch_size, n_params, n_params)
        self.lstm_net = LSTMNet(self.embedding_size, n_params, hidden_dim, num_layers).to(device)

        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.dataset_config = dataset_config

    def forward(self, data):
        x_a, y_a, params_delta, init_params = data
        # lstm_net_inputs = torch.autograd.Variable(torch.LongTensor(_x1.size(0), self.num_layers, _x1.size(-1), self.hidden_dim)).to(device)
        # lstm_net_outputs = self.lstm_net(lstm_net_inputs)
        # torch.argmax(lstm_net_outputs)
        # for i, j in zip(range(_x1.size(0)), range(_x1.size(0))):

        self.sub_models.clear()
        sub_m_prob_out = torch.argmax(torch.sigmoid(self.weights), dim=-1)
        for i, index in enumerate(sub_m_prob_out):
            param_model_path = self.param_models_dir + '/{}.pth'.format(index)
            print(i, param_model_path)
            sub_m = OperationModel(config['dataset_config']['sample_rate'], 1, data_shape[-1], params_delta.shape[-1])
            if os.path.exists(param_model_path):
                sub_m.load_state_dict(torch.load(param_model_path))
            self.sub_models.append(sub_m.to(device))
        params = init_params.clone()
        for j, sub_model in enumerate(self.sub_models):
            x_a, out_delta = sub_model([x_a, params])
            params += out_delta
            # params_ = torch.sigmoid(params.detach())
            # # print('params_.size():', params_.size())
            # # params_ = params.clone()
            # # params_[params < 0.] = 0.
            # # x2 = x1
            # x1 = []
            # for i in range(params.size(0)):
            #     pred_params = params_[i, ...].cpu().detach().numpy().tolist()
            #     self.simpledaw.set_params(pred_params)
            #     audio = self.simpledaw.render(self.max_duration)
            #     # if i == 0:
            #     #     print('子模型预测参数：', pred_params)
            #     #     sd.play(audio, samplerate=44100., blocking=True)
            #     #     sf.write(cwd + '/img/{}_{}.wav'.format(i, j), audio, samplerate=int(config['dataset_config']['sample_rate']))
            #     x1.append(torch.unsqueeze(torch.tensor(
            #         load_audio_as_array(raw=audio,
            #                             compress_rate=self.dataset_config['compress_rate'],
            #                             max_duration=self.max_duration,
            #                             sample_rate_=self.dataset_config['sample_rate'],
            #                             raw_audio_path='')[0]), dim=0))
            # x1 = torch.concat(x1).to(device)
        return x_a, torch.sigmoid(params)


def l12_smooth(input_tensor, a=0.05):
    """Smoothed L1/2 norm"""
    if type(input_tensor) == list:
        return sum([l12_smooth(tensor) for tensor in input_tensor])

    smooth_abs = torch.where(torch.abs(input_tensor) < a,
                             torch.pow(input_tensor, 4) / (-8 * a ** 3) + torch.square(
                                 input_tensor) * 3 / 4 / a + 3 * a / 8,
                             torch.abs(input_tensor))

    return torch.sum(torch.sqrt(smooth_abs))


def loss_function(y, y_hat, sample_rate):  # , weights, lambda_=0.5):
    mse = torch.mean(torch.square(y - y_hat))
    return mse  # + lambda_ * l12_smooth(weights)
    # return mfcc_cosine(y, y_hat, sample_rate)[0]


def sub_model_loss_function(y, y_hat, y_p, y_p_hat, sample_rate, lambda_=0.8):
    mse = torch.mean(torch.square(y - y_hat))
    return mse  # lambda_ * mfcc_cosine(y, y_hat, sample_rate)[0] + (1 - lambda_) * mse


class Simulator(gym.Env):
    def __init__(self, model, dataloader, target_audio, n_sub_models, n_params, config):
        self.action_space = spaces.Box(high=1., low=0., shape=(n_sub_models * n_params,))
        self.observation_space = spaces.Box(high=1., low=0., shape=(config['batch_size'] * n_params,))
        self.step_count = 0
        self.n_sub_models = n_sub_models
        self.n_params = n_params
        self.config = config
        self.model = model
        self.dataloader = dataloader
        self.target_audio = target_audio
        self.loss_func = torch.nn.MSELoss()

    def step(self, action):
        x1, x2, params_delta, _init_params = iter(dataloader).__next__()
        # self.model.weights = torch.nn.Parameter(torch.reshape(torch.tensor(action), (self.n_sub_models, self.n_params)))
        out_a, out_p = self.model([x1.to(device), x2.to(device), params_delta.to(device), _init_params.to(device)])
        # print(target_audio.shape, out_a.size())
        loss = loss_function(torch.tensor(load_audio_as_array(raw=target_audio,
                                                              compress_rate=config['dataset_config'][
                                                                  'compress_rate'],
                                                              max_duration=config['dataset_config'][
                                                                  'max_duration'],
                                                              sample_rate_=config['dataset_config'][
                                                                  'sample_rate'],
                                                              raw_audio_path='')[0]).to(device),
                             out_a.to(device), self.config['dataset_config']['sample_rate'])
        # model.sigma = loss
        # lstm_net_loss = lstm_net_loss_func(target_audio, audio)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        pred_params = out_p[0, ...].cpu().detach().numpy().tolist()
        print('预测参数：', pred_params)
        simpledaw.set_params(pred_params)
        audio = librosa.to_mono(simpledaw.render(self.config['dataset_config']['max_duration']).transpose(1, 0))
        print(audio.shape)
        sd.play(audio, samplerate=self.config['dataset_config']['sample_rate'], blocking=True)
        reward = torch.log(mfcc_cosine(torch.tensor(load_audio_as_array(raw=target_audio,
                                                                        compress_rate=config['dataset_config'][
                                                                            'compress_rate'],
                                                                        max_duration=config['dataset_config'][
                                                                            'max_duration'],
                                                                        sample_rate_=config['dataset_config'][
                                                                            'sample_rate'],
                                                                        raw_audio_path='')[0]).to(device),
                                       torch.tensor(load_audio_as_array(raw=audio,
                                                                        compress_rate=config['dataset_config'][
                                                                            'compress_rate'],
                                                                        max_duration=config['dataset_config'][
                                                                            'max_duration'],
                                                                        sample_rate_=config['dataset_config'][
                                                                            'sample_rate'],
                                                                        raw_audio_path='')[0]).to(device),
                                       config['dataset_config']['sample_rate'])[0]).data.item()
        print('step:', step, 'loss:', loss.data.item(), 'reward:', reward)

        if step % 1 == 0:
            params_ = out_p[0, ...].cpu().detach()
            params_[params_ < 0.] = 0.
            params_ = params_.numpy().tolist()
            print(params_)
            simpledaw.set_params(params_)
            audio = librosa.to_mono(simpledaw.render(config['dataset_config']['max_duration']).T)
            # print(audio.shape, raw_length[0], raw_sr[0])
            # audio = resample(audio,
            #                  int(config['dataset_config']['max_duration'] * config['dataset_config']['sample_rate']))
            sd.play(audio, samplerate=config['dataset_config']['sample_rate'], blocking=True)
            sf.write(cwd + '/img/{}.wav'.format(step), audio, samplerate=int(config['dataset_config']['sample_rate']))

        self.step_count += 1
        state = out_p.view(-1).cpu().detach().numpy()

        print('[第{}轮训练]'.format(self.step_count), 'action:', action.shape, 'state:', state.shape)

        done = True
        info = {}
        return state, reward, done, info

    def reset(self):
        state = np.zeros((self.config['batch_size'] * self.n_params,))
        return state

    def render(self, mode='human'):
        pass

    def seed(self, seed=None):
        pass


if __name__ == '__main__':
    steps = 2000
    n_sub_models = 5
    hidden_dim = 8
    num_layers = 2

    cwd = 'D:/dev_spa/DMuse/202207'

    config = {
        'dataset_config': {
            'train_preset_dir': cwd,
            'test_preset_dir': cwd,
            'max_duration': .1,
            'sample_rate': 11000.,
            'compress_rate': 1.,
        },
        'batch_size': 2,
        'shuffle': True,
    }

    model_path = cwd + '/synthsym_1.pth'
    param_models_dir = cwd + '/param_models_3'

    synth_plugin = 'C:/VST/64bit/Sylenth1.dll'
    simpledaw = SimpleDAW(plugin=synth_plugin, sample_rate=config['dataset_config']['sample_rate'])
    simpledaw.load_midi(r"B:\muse_repo\MIDI\melody\20220608_01.mid")

    init_params = torch.tensor(simpledaw.get_params())
    target_preset_file = cwd + '/Dajun 20201213_01.fxp'
    target_audio_file = cwd + '/Dajun 20201213_01.wav'
    target_params = torch.tensor(simpledaw.get_params(target_preset_file))
    target_audio = simpledaw.render(config['dataset_config']['max_duration'])
    target_audio = librosa.to_mono(target_audio.T)
    print(target_audio.shape, target_params.size())
    sd.play(target_audio, samplerate=config['dataset_config']['sample_rate'], blocking=True)

    simpledaw.set_params(init_params)

    sub_m_dataset = SynthOperationDataset(simpledaw, 0, mode='train', sub_dir='.', **config['dataset_config'])
    sub_m_dataloader = torch.utils.data.DataLoader(
        dataset=sub_m_dataset, batch_size=config['batch_size'], shuffle=config['shuffle']
    )

    dataset = SynthParamsDataset(simpledaw, mode='train', sub_dir='.', **config['dataset_config'])
    dataloader = torch.utils.data.DataLoader(
        dataset=dataset, batch_size=config['batch_size'], shuffle=config['shuffle']
    )
    x1, x2, params_delta, _init_params = iter(dataloader).__next__()

    data_shape = x1.size()
    print('data shape:', data_shape, 'len(params_delta):', params_delta.shape[-1])

    model = SymNet(simpledaw, param_models_dir, config['dataset_config']['sample_rate'],
                   config['dataset_config']['max_duration'],
                   config['batch_size'], 1, data_shape[-1],
                   params_delta.size(-1), n_sub_models, hidden_dim, num_layers, config['dataset_config'])
    if os.path.exists(model_path):
        state_dict = torch.load(model_path)
        model.load_state_dict(state_dict)
    model = model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0003)
    # lstm_net_loss_func = torch.nn.CrossEntropyLoss()
    # lstm_net_optimizer = torch.optim.Adam(model.parameters(), lr=0.0003)

    trainer = Simulator(model, dataloader, target_audio, n_sub_models, params_delta.size(-1), config)
    rl_model = DDPG(policy="MlpPolicy", env=trainer)

    for i in range(params_delta.size(-1)):
        param_model_path = param_models_dir + '/{}.pth'.format(i)
        sub_m = OperationModel(config['dataset_config']['sample_rate'], 1, data_shape[-1], params_delta.size(-1)).to(
            device)
        sub_m_optimizer = torch.optim.Adam(sub_m.parameters(), lr=0.0003)
        sub_m_scheduler = lr_scheduler.StepLR(sub_m_optimizer, step_size=5, gamma=0.9)
        if os.path.exists(param_model_path):
            # sub_m.load_state_dict(torch.load(param_model_path))
            continue
        sub_m_dataset.param_index = i
        for sub_step in range(1, 200 + 1):
            sub_m.train(True)
            x_a, y_a, x_p, param_index, params_delta, _init_params = iter(sub_m_dataloader).__next__()
            sub_m_output, sub_m_params_delta_output = sub_m([x_a.to(device), x_p.to(device)])
            # print(sub_m_output.size())
            # simpledaw.set_params((x_p.cpu().detach() + sub_m_params_delta_output.cpu().detach())[0, ...].numpy().tolist())
            # sub_m_audio = simpledaw.render(config['dataset_config']['max_duration'])
            # print(sub_m_audio.shape)
            # sd.play(sub_m_audio, samplerate=config['dataset_config']['sample_rate'], blocking=False)
            # sd.play(sub_m_audio, samplerate=config['dataset_config']['sample_rate'], blocking=False)
            sub_m_loss = sub_model_loss_function(y_a.to(device), sub_m_output, params_delta.to(device),
                                                 sub_m_params_delta_output, config['dataset_config']['sample_rate'])
            print('param_models:', i, 'sub_step:', sub_step, 'loss:', sub_m_loss.data.item())
            sub_m_optimizer.zero_grad()
            sub_m_loss.backward()
            sub_m_optimizer.step()
            sub_m_scheduler.step()
            sub_m.train(False)
        torch.save(sub_m.state_dict(), param_model_path)
        del sub_m, sub_m_optimizer
        gc.collect()

    for step in range(1, steps + 1):
        # try:
        rl_model.learn(total_timesteps=1)
        rl_model.save(cwd + '/pg_rl.zip')
        # except Exception:
        #     pass

        obs = trainer.reset()

        torch.save(model.state_dict(), model_path)
