import torch
from torch import nn

from dataset import AudioDataset


class SymNet(torch.nn.Module):
    def __init__(self, spec_shape, sub_models):
        super().__init__()
        self.weights = []
        self.sub_models = sub_models
        
        for sub_model in self.sub_models:
            self.weights.append(
                torch.nn.Parameter(torch.rand(size=([spec_shape[1], spec_shape[3]])), requires_grad=True)
            )
            
    def forward(self, x):
        for W, sub_m in zip(self.weights, self.sub_models):
            x = x.matmul(W)
            x = sub_m(x)
        return x


def l12_smooth(input_tensor, a=0.05):
    """Smoothed L1/2 norm"""
    if type(input_tensor) == list:
        return sum([l12_smooth(tensor) for tensor in input_tensor])

    smooth_abs = torch.where(torch.abs(input_tensor) < a,
                             torch.pow(input_tensor, 4) / (-8 * a ** 3) + torch.square(input_tensor) * 3 / 4 / a + 3 * a / 8,
                             torch.abs(input_tensor))

    return torch.sum(torch.sqrt(smooth_abs))


def loss_function(y, y_hat, weights, lambda_=0.5):
    mse = torch.mean(torch.square(y - y_hat))
    
    return mse + lambda_ * l12_smooth(weights)
    
    
if __name__ == '__main__':
    target_audio_file = r"C:\dev_spa\DMuse\202202c1\Kirara Magic - Fly-5.wav"
    
    cwd = 'C:/dev_spa/DMuse/202202c1'
    
    config = {
        'dataset_config': {
            'train_sample_dir': cwd,
            'test_sample_dir': cwd,
            'train_label_dir': cwd,
            'test_label_dir': cwd,
            'compress_rate': 10.,
        },
        'batch_size': 4,
        'shuffle': True,
    }
    
    snap_model = torch.load('snap.pth')
    
    net = SymNet(spec_shape, sub_models)
    optimizer = torch.optim.Adam(net.parameters(), lr=0.0003)
    
    output = net(x)
    loss = loss_function(x, output, net.weights)
    print('loss:', loss)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    
