import torch
import torchvision
from torch import nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from ssqueezepy import issq_stft, issq_cwt, ifft
import sounddevice as sd
import soundfile as sf
from scipy.signal import resample
import os
from PIL import Image

from artear2_7.dataset import AudioDataset


class VAE(nn.Module):

    def __init__(self, input_shape, z_dim):
        super(VAE, self).__init__()
        
        self.input_shape = input_shape
        self.transposed_input_shape = (input_shape[0], input_shape[3], input_shape[2], input_shape[1])
        self.input_dim = input_shape[1] * input_shape[2] * input_shape[3]
        self.z_dim = z_dim
        
        '''self.encoder=nn.Sequential(  # 3*245*882
                nn.Conv2d(3, 4, 3, 1, 1),#16*245*882
                nn.ReLU(inplace=True),
                nn.BatchNorm2d(4),
                nn.Conv2d(4, 4, 3, 1, 1),#16*245*882
                nn.ReLU(inplace=True),
                nn.BatchNorm2d(4),
                
                nn.Conv2d(4, 4, 3, 1, 1),#4*122*441
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=2,stride=2),#4*61*220
                nn.BatchNorm2d(4),
                
                nn.Conv2d(4, 4, 3, 1, 1),#3*61*220
                nn.ReLU(inplace=True),
                nn.BatchNorm2d(4),
                nn.Conv2d(4, 4, 3, 1, 1),#3*61*220
                nn.ReLU(inplace=True),
                nn.BatchNorm2d(4),
                
                nn.Conv2d(4, 3, 3, 1, 1),#3*61*220
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=2,stride=2),#4*61*220
                nn.BatchNorm2d(3),
            )
        
        self.encoder = torchvision.models.vgg16_bn()'''
        
        '''self.decoder=nn.Sequential(  # 3*61*220
                nn.ConvTranspose2d(3,3,kernel_size=3,stride=2, padding=0),#3*122*440
                nn.BatchNorm2d(3),
                nn.ReLU(inplace=True),
                
                nn.ConvTranspose2d(3,3,kernel_size=3,stride=2, padding=2),#1*244*880
                nn.BatchNorm2d(3),
                nn.ReLU(inplace=True),
                
                nn.ConvTranspose2d(3,3,kernel_size=3,stride=1, padding=2),#3*244*880
                nn.BatchNorm2d(3),
                # nn.ReLU(inplace=True),
            )'''
        '''self.decoder=nn.Sequential(  # 3*61*220
                nn.UpsamplingBilinear2d(scale_factor=2),
                # nn.Conv2d(3, 3, 3, 1, 1),
                nn.ReLU(),
                # nn.BatchNorm2d(3),
                
                # nn.Conv2d(3, 3, 3, 1, 1),
                # nn.ReLU(),
                # nn.BatchNorm2d(3),
                
                nn.UpsamplingBilinear2d(scale_factor=2),
                # nn.Conv2d(3, 3, 3, 1, 1),
                nn.ReLU(),
                # nn.BatchNorm2d(3),
                
                # nn.Conv2d(3, 3, 3, 1, 1),
                nn.Sigmoid(),
        )'''
        

        # 编码器 ： [b, input_dim] => [b, z_dim]
        # print(self.transposed_input_shape)
        self.h2_shape = (self.transposed_input_shape[0], self.transposed_input_shape[1], 
                        int(int(int(self.transposed_input_shape[2] / 2) / 2)), 
                        int(int(int(self.transposed_input_shape[3] / 2) / 2)))
        h2_dim = self.h2_shape[1] * self.h2_shape[2] * self.h2_shape[3]
        self.h_shape = (self.input_shape[0], 1000)
        h_dim = 128
        
        self.lstm = nn.LSTM(self.input_shape[2], 1024, batch_first=True)
        self.encoder=nn.Sequential(
            nn.ReLU(),
            nn.Linear(1024, 128),
            nn.ReLU(),
            nn.Linear(128, h_dim),
            nn.ReLU(),
        )
        self.decoder=nn.Sequential(
            nn.Linear(h_dim, 512),
            nn.ReLU(),
            nn.Linear(512, 128),
            nn.ReLU(),
            nn.Linear(128, self.input_dim),
        )
        
        # self.fc1 = nn.Linear(self.input_dim, h_dim)  # 第一个全连接层
        self.fc2 = nn.Linear(h_dim, z_dim)  # mu
        self.fc3 = nn.Linear(h_dim, z_dim)  # log_var

        # 解码器 ： [b, z_dim] => [b, input_dim]
        self.fc4 = nn.Linear(z_dim, h_dim)

    def forward(self, x):
        """
        向前传播部分, 在model_name(inputs)时自动调用
        :param x: the input of our training model [b, batch_size, 1, 28, 28]
        :return: the result of our training model
        """
        batch_size = x.shape[0]  
        # x = x.view(batch_size, x.shape[1], x.shape[2], x.shape[3])  # 一行代表一个样本
        # print(x.size(), self.input_shape)
        
        # encoder
        mu, log_var = self.encode(x)
        # reparameterization trick
        sampled_z = self.reparameterization(mu, log_var)
        # decoder
        x_hat = self.decode(sampled_z)
        
        # print(self.input_shape, x_hat.size())
        # reshape
        x_hat = x_hat.view(self.input_shape)
        # print(x_hat.size())
        return x_hat, mu, log_var

    def encode(self, x):
        """
        encoding part
        :param x: input image
        :return: mu and log_var
        """
        '''h = F.relu(self.fc1(x))
        mu = self.fc2(h)
        log_var = self.fc3(h)'''
        x = torch.transpose(x, 1, 3)
        x = torch.transpose(x, 2, 3)
        print(x.size())
        x = x.reshape(x.size()[0], -1, x.size(-1))
        print(x.size())
        x, (h_n, h_c) = self.lstm(x, None)
        x = x[:, -1, :]
        # x = x.reshape(x.size()[0], -1)
        h = self.encoder(x)
        # print('h:', h.size())
        h = h.view(h.size()[0], -1)
        mu = self.fc2(h)
        log_var = self.fc3(h)

        return mu, log_var

    def reparameterization(self, mu, log_var):
        """
        Given a standard gaussian distribution epsilon ~ N(0,1),
        we can sample the random variable z as per z = mu + sigma * epsilon
        :param mu:
        :param log_var:
        :return: sampled z
        """
        sigma = torch.exp(log_var * 0.5)
        eps = torch.randn_like(sigma)
        return mu + sigma * eps  # 这里的“*”是点乘的意思

    def decode(self, z):
        """
        Given a sampled z, decode it back to image
        :param z:
        :return:
        """
        '''h = F.relu(self.fc4(z))
        x_hat = torch.sigmoid(self.fc5(h))  # 图片数值取值为[0,1]，不宜用ReLU'''
        z = self.fc4(z)
        # print(z.size())
        # z = z.view(self.input_shape[0], -1)
        # print(z.size())
        x_hat = self.decoder(z)
        x_hat = torch.sigmoid(x_hat)
        # print(x_hat.size())
        # x_hat = self.fc5(h)
        # x_hat = torch.transpose(x_hat, 1, 3)
        return x_hat


def vae_loss_function(x_hat, x, mu, log_var):
    # 1. the reconstruction loss.
    # We regard the MNIST as binary classification
    # print(x_hat, x)
    BCE = F.binary_cross_entropy(x_hat, x, reduction='sum')

    # 2. KL-divergence
    # D_KL(Q(z|X) || P(z)); calculate in closed form as both dist. are Gaussian
    # here we assume that \Sigma is a diagonal matrix, so as to simplify the computation
    KLD = 0.5 * torch.sum(torch.exp(log_var) + torch.pow(mu, 2) - 1. - log_var)

    # 3. total loss
    # MSE = torch.nn.MSELoss(reduction='sum')(x_hat, x)
    loss = BCE + KLD
    return loss, # BCE, KLD


def audio_similarity(audio1, audio2, hop_length=128):
    _sum = 0
    _range = torch.arange(0, audio1.shape[0], hop_length)
    for i in _range:
        if i+100 <= audio1.shape[0]:
            _sum += F.cosine_similarity(torch.tensor(audio1[i:i+hop_length], requires_grad=True),
                                        torch.tensor(audio2[i:i+hop_length], requires_grad=True), dim=-1)
            # _sum += F.cosine_similarity(audio1[i:i+hop_length, 0], audio2[i:i+hop_length, 0])
            # _sum += F.cosine_similarity(audio1[i:i+hop_length, 1], audio2[i:i+hop_length, 1])
    # _sum /= _range.shape[0] * 2
    _sum /= _range.shape[0]
    return _sum,


if __name__ == '__main__':
    
    steps = 2000
    z_dim = 40
    
    cwd = 'C:/dev_spa/DMuse/新建文件夹'
    
    config = {
        'dataset_config': {
            'train_sample_dir': cwd,
            'test_sample_dir': cwd,
            'train_label_dir': cwd,
            'test_label_dir': cwd,
            'max_duration': 1.,
            'sample_rate': 44100,
            'compress_rate': 10.,
        },
        'batch_size': 1,
        'shuffle': True,
    }
    
    dataset = AudioDataset(mode='train', sub_dir='.', **config['dataset_config'])
    
    dataloader = torch.utils.data.DataLoader(
        dataset=dataset, batch_size=config['batch_size'], shuffle=config['shuffle']
    )
    x, raw_length, raw_sr = iter(dataloader).__next__()
    
    spec_shape = x.size()
    # print(spec_shape)
    
    model = VAE(input_shape=spec_shape, z_dim=z_dim)
    if os.path.exists(cwd + '/sound_vae5.pth'):
        state_dict = torch.load(cwd + '/sound_vae5.pth')
        model.load_state_dict(state_dict)
    
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0003)
    
    for step in range(1, steps + 1):
        x, raw_length, raw_sr = iter(dataloader).__next__()
        
        # x = x.cuda()
        x_hat, mu, log_var = model(x)

        _audios = []
        for _x in [x_hat, x]:
            real, imag = x_hat[0, ..., 0], x_hat[0, ..., 1]
            real, imag = real.detach().numpy(), imag.detach().numpy()
            audio = issq_cwt((real + 1j * imag).T)
            _audios.append(resample(audio, int(config['dataset_config']['max_duration'] * config['dataset_config']['sample_rate'])))
        loss = audio_similarity(*_audios)

        # loss = vae_loss_function(x_hat, x, mu, log_var)
        print('step:', step, 'loss:', loss[0].data.item())
        optimizer.zero_grad()
        loss[0].backward()
        optimizer.step()
        if step % 1 == 0:
            plt.imshow(x_hat[0, ..., 2].detach().numpy().T)
            plt.show()
            # plt.savefig('./img2/4/{}.png'.format(step))
            img = Image.fromarray(x_hat[0, ..., 2].detach().numpy().T)
            img = img.convert('RGB')
            img.save(cwd + '/img/{}.bmp'.format(step))
            real, imag = x_hat[0, ..., 0], x_hat[0, ..., 1]
            real, imag = real.detach().numpy(), imag.detach().numpy()
            audio = issq_cwt((real + 1j * imag).T)
            # print(audio.shape, raw_length[0], raw_sr[0])
            audio = resample(audio, int(config['dataset_config']['max_duration'] * config['dataset_config']['sample_rate'] ))
            sd.play(audio, samplerate=config['dataset_config']['sample_rate'], blocking=False)
            sf.write(cwd + '/img/{}.wav'.format(step), audio, samplerate=config['dataset_config']['sample_rate'])
            torch.save(model.state_dict(), cwd + '/sound_vae5.pth')
    
    