import torch.autograd
import torch.nn as nn
from torch.autograd import Variable
from torchvision import transforms
from torchvision import datasets
from torchvision.utils import save_image
import os
import matplotlib.pyplot as plt
from ssqueezepy import issq_stft, issq_cwt, ifft
import sounddevice as sd
import soundfile as sf
from scipy.signal import resample
from PIL import Image

from dataset import AudioDataset
 
 
def to_img(x):
    out = 0.5 * (x + 1)
    out = out.clamp(0, 1)  # Clamp函数可以将随机变化的数值限制在一个给定的区间[min, max]内：
    out = out.view(-1, 1, 28, 28)  # view()函数作用是将一个多行的Tensor,拼接成一行
    return out
 
 
z_dimension = 64
 
# 定义判别器  #####Discriminator######使用多层网络来作为判别器
# 将图片28x28展开成784，然后通过多层感知器，中间经过斜率设置为0.2的LeakyReLU激活函数，
# 最后接sigmoid激活函数得到一个0到1之间的概率进行二分类。
class discriminator(nn.Module):
    def __init__(self, input_shape):
        super(discriminator, self).__init__()
        self.input_shape = input_shape
        self.input_dim = input_shape[1] * input_shape[2] * input_shape[3]
        self.h2_shape = (self.input_shape[0], self.input_shape[1], 
                        int(int(int(self.input_shape[2] / 2) / 2)), 
                        int(int(int(self.input_shape[3] / 2) / 2)))
        print(self.input_shape, self.h2_shape)
        h2_dim = self.h2_shape[1] * self.h2_shape[2] * self.h2_shape[3]
        '''self.encoder=nn.Sequential(  # 3*245*882
                nn.Conv2d(3, 4, 3, 1, 1),#16*245*882
                nn.ReLU(inplace=True),
                nn.BatchNorm2d(4),
                nn.Conv2d(4, 4, 3, 1, 1),#16*245*882
                nn.ReLU(inplace=True),
                nn.BatchNorm2d(4),
                
                nn.Conv2d(4, 4, 3, 1, 1),#4*122*441
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=2,stride=2),#4*61*220
                nn.BatchNorm2d(4),
                
                nn.Conv2d(4, 4, 3, 1, 1),#3*61*220
                nn.ReLU(inplace=True),
                nn.BatchNorm2d(4),
                nn.Conv2d(4, 4, 3, 1, 1),#3*61*220
                nn.ReLU(inplace=True),
                nn.BatchNorm2d(4),
                
                nn.Conv2d(4, 3, 3, 1, 1),#3*61*220
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=2,stride=2),#4*61*220
                nn.BatchNorm2d(3),
            )'''
        self.fc1 = nn.Sequential(
            nn.Linear(h2_dim, 1),
            nn.Sigmoid()
        )
        self.dis = nn.Sequential(
            nn.Linear(self.input_dim, 256),  # 输入特征数为784，输出为256
            nn.LeakyReLU(0.2),  # 进行非线性映射
            nn.Linear(256, 256),  # 进行一个线性映射
            nn.LeakyReLU(0.2),
            nn.Linear(256, 1),
            nn.Sigmoid()  # 也是一个激活函数，二分类问题中，
            # sigmoid可以班实数映射到【0,1】，作为概率值，
            # 多分类用softmax函数
        )
 
    def forward(self, x):
        # x = self.dis(x)
        # print(x.size())
        # x = torch.transpose(x, 1, 3)
        # print(x.size())
        x = x.view(x.size()[0], -1)
        x = self.dis(x)
        # x = self.fc1(x)
        return x
 
 
# ###### 定义生成器 Generator #####
# 输入一个100维的0～1之间的高斯分布，然后通过第一层线性变换将其映射到256维,
# 然后通过LeakyReLU激活函数，接着进行一个线性变换，再经过一个LeakyReLU激活函数，
# 然后经过线性变换将其变成784维，最后经过Tanh激活函数是希望生成的假的图片数据分布
# 能够在-1～1之间。
class generator(nn.Module):
    def __init__(self, input_shape):
        super(generator, self).__init__()
        self.input_shape = input_shape
        self.input_dim = input_shape[1] * input_shape[2] * input_shape[3]
        self.gen = nn.Sequential(
            nn.Linear(z_dimension, 128),  # 用线性变换将输入映射到256维
            nn.ReLU(),  # relu激活
            nn.Linear(128, 256),  # 用线性变换将输入映射到256维
            nn.ReLU(),  # relu激活
            nn.Linear(256, 256),  # 线性变换
            nn.ReLU(),
            nn.Linear(256, self.input_dim),  # 线性变换
            nn.Tanh(),  # Tanh激活使得生成数据分布在【-1,1】之间，因为输入的真实数据的经过transforms之后也是这个分布
        )
        '''self.decoder=nn.Sequential(  # 3*61*220
                nn.UpsamplingBilinear2d(scale_factor=2),
                nn.Conv2d(3, 3, 3, 1, 1),
                nn.ReLU(),
                nn.BatchNorm2d(3),
                
                nn.Conv2d(3, 3, 3, 1, 1),
                nn.ReLU(),
                nn.BatchNorm2d(3),
                
                nn.UpsamplingBilinear2d(scale_factor=2),
                nn.Conv2d(3, 3, 3, 1, 1),
                nn.ReLU(),
                nn.BatchNorm2d(3),
                
                nn.Conv2d(3, 3, 3, 1, 1),
                nn.Sigmoid(),
        )'''
        
 
    def forward(self, x):
        x = self.gen(x)
        x = x.reshape((x.size()[0],) + self.input_shape[1:])
        # x = self.decoder(x)
        # x = torch.transpose(x, 1, 3)
        return x
 

if __name__ == '__main__':
    # 创建文件夹
    if not os.path.exists('./img2/7'):
        os.mkdir('./img2/7')
        
    batch_size = 128
    num_epoch = 1000
    # 图像预处理
    img_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5,), (0.5,))  # (x-mean) / std
    ])

    cwd = 'C:/dev_spa/DMuse/202202c1'
    
    config = {
        'dataset_config': {
            'train_sample_dir': cwd,
            'test_sample_dir': cwd,
            'train_label_dir': cwd,
            'test_label_dir': cwd,
            'max_duration': 1.,
            'sample_rate': 44100,
            'compress_rate': 150.,
        },
        'batch_size': 2,
        'shuffle': True,
    }
    
    dataset = AudioDataset(mode='train', sub_dir='clap', **config['dataset_config'])

    dataloader = torch.utils.data.DataLoader(
        dataset=dataset, batch_size=config['batch_size'], shuffle=True
    )
    
    x, raw_length, raw_sr = iter(dataloader).__next__()
 
    D = discriminator(x.size())
    G = generator(x.size())
    if torch.cuda.is_available():
        D = D.cuda()
        G = G.cuda()
    if os.path.exists(cwd + '/sound_discriminator.pth'):
        state_dict = torch.load(cwd + '/sound_discriminator.pth')
        D.load_state_dict(state_dict)
    if os.path.exists(cwd + '/sound_generator.pth'):
        state_dict = torch.load(cwd + '/sound_generator.pth')
        G.load_state_dict(state_dict)
    
    criterion = nn.BCELoss()
    d_optimizer = torch.optim.Adam(D.parameters(), lr=0.0003)
    g_optimizer = torch.optim.Adam(G.parameters(), lr=0.0003)
 
    # ##########################进入训练##判别器的判断过程#####################
    for epoch in range(num_epoch):  # 进行多个epoch的训练
        for i, (img, raw_length, raw_sr) in enumerate(dataloader):
            num_img = img.size()[0]
            # print(num_img)
    
            # img = img.view(num_img, -1)  # 将图片展开为28*28=784
            real_img = Variable(img).cuda()  # 将tensor变成Variable放入计算图中
            real_label = Variable(torch.ones((num_img, 1))).cuda()  # 定义真实的图片label为1
            fake_label = Variable(torch.zeros((num_img, 1))).cuda()  # 定义假的图片的label为0
 

            real_out = D(real_img)  # 将真实图片放入判别器中
            # real_out = real_out.squeeze()  # (128,1) -> (128,)
            d_loss_real = criterion(real_out, real_label)  # 得到真实图片的loss
            real_scores = real_out  # 得到真实图片的判别值，输出的值越接近1越好
            # 计算假的图片的损失
            z = Variable(torch.randn(num_img, z_dimension)).cuda()  # 随机生成一些噪声
            fake_img = G(z).detach()  # 随机噪声放入生成网络中，生成一张假的图片。 # 避免梯度传到G，因为G不用更新, detach分离
            fake_out = D(fake_img)  # 判别器判断假的图片，
            # fake_out = fake_out.squeeze()  # (128,1) -> (128,)
            d_loss_fake = criterion(fake_out, fake_label)  # 得到假的图片的loss
            fake_scores = fake_out  # 得到假图片的判别值，对于判别器来说，假图片的损失越接近0越好
            # 损失函数和优化
            d_loss = d_loss_real + d_loss_fake  # 损失包括判真损失和判假损失
            d_optimizer.zero_grad()  # 在反向传播之前，先将梯度归0
            d_loss.backward()  # 将误差反向传播
            d_optimizer.step()  # 更新参数
 
            z = Variable(torch.randn(num_img, z_dimension)).cuda()  # 得到随机噪声
            fake_img = G(z)  # 随机噪声输入到生成器中，得到一副假的图片
            output = D(fake_img)  # 经过判别器得到的结果
            # output = output.squeeze()
            g_loss = criterion(output, real_label)  # 得到的假的图片与真实的图片的label的loss
            # bp and optimize
            g_optimizer.zero_grad()  # 梯度归0
            g_loss.backward()  # 进行反向传播
            g_optimizer.step()  # .step()一般用在反向传播后面,用于更新生成网络的参数
            
            if (i + 1) % 5 == 0:
                print('Epoch[{}/{}],d_loss:{:.6f},g_loss:{:.6f} '
                            'D real: {:.6f},D fake: {:.6f}'.format(
                        epoch, num_epoch, d_loss.data.item(), g_loss.data.item(),
                        real_scores.data.mean(), fake_scores.data.mean()  # 打印的是真实图片的损失均值
                    ))
 
            # 打印中间的损失
        if (epoch + 1) % 1 == 0:
            z = Variable(torch.randn(num_img, z_dimension)).cuda()  # 得到随机噪声
            fake_img = G(z).cpu()
            # img = Image.fromarray(fake_img[0, ..., 2].detach().numpy().T)
            # img = img.convert('RGB')
            # img.save('./img2/5/{}.bmp'.format(epoch))
            plt.imshow(fake_img[0, 2, ...].detach().numpy())
            plt.savefig('./img2/7/{}.tif'.format(epoch))
            real, imag = fake_img[0, ..., 0], fake_img[0, ..., 1]
            real, imag = real.detach().numpy(), imag.detach().numpy()
            audio = issq_cwt((real + 1j * imag).T)
            # print(audio.shape, raw_length[0], raw_sr[0])
            audio = resample(audio, int(config['dataset_config']['max_duration'] * config['dataset_config']['sample_rate'] ))
            sd.play(audio, samplerate=config['dataset_config']['sample_rate'], blocking=False)
            sf.write('./img2/7/{}.wav'.format(epoch), audio, samplerate=config['dataset_config']['sample_rate'])
            torch.save(D.state_dict(), cwd + '/sound_discriminator.pth')
            torch.save(G.state_dict(), cwd + '/sound_generator.pth')
            
    
    # 保存模型
    torch.save(G.state_dict(), './generator.pth')
    torch.save(D.state_dict(), './discriminator.pth')