File size: 2,196 Bytes
45ee559
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import unittest

import numpy as np
import torch
from torch import optim

from TTS.vocoder.configs import WavegradConfig
from TTS.vocoder.models.wavegrad import Wavegrad, WavegradArgs

# pylint: disable=unused-variable

torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class WavegradTrainTest(unittest.TestCase):
    def test_train_step(self):  # pylint: disable=no-self-use
        """Test if all layers are updated in a basic training cycle"""
        input_dummy = torch.rand(8, 1, 20 * 300).to(device)
        mel_spec = torch.rand(8, 80, 20).to(device)

        criterion = torch.nn.L1Loss().to(device)
        args = WavegradArgs(
            in_channels=80,
            out_channels=1,
            upsample_factors=[5, 5, 3, 2, 2],
            upsample_dilations=[[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]],
        )
        config = WavegradConfig(model_params=args)
        model = Wavegrad(config)

        model_ref = Wavegrad(config)
        model.train()
        model.to(device)
        betas = np.linspace(1e-6, 1e-2, 1000)
        model.compute_noise_level(betas)
        model_ref.load_state_dict(model.state_dict())
        model_ref.to(device)
        count = 0
        for param, param_ref in zip(model.parameters(), model_ref.parameters()):
            assert (param - param_ref).sum() == 0, param
            count += 1
        optimizer = optim.Adam(model.parameters(), lr=0.001)
        for i in range(5):
            y_hat = model.forward(input_dummy, mel_spec, torch.rand(8).to(device))
            optimizer.zero_grad()
            loss = criterion(y_hat, input_dummy)
            loss.backward()
            optimizer.step()
        # check parameter changes
        count = 0
        for param, param_ref in zip(model.parameters(), model_ref.parameters()):
            # ignore pre-higway layer since it works conditional
            # if count not in [145, 59]:
            assert (param != param_ref).any(), "param {} with shape {} not updated!! \n{}\n{}".format(
                count, param.shape, param, param_ref
            )
            count += 1