BorisovMaksim commited on
Commit
77c6c52
·
1 Parent(s): 091b1e0
Files changed (3) hide show
  1. README.md +3 -0
  2. denoisers/SpectralGating.py +0 -1
  3. evaluation.py +8 -8
README.md CHANGED
@@ -1,3 +1,6 @@
1
  | Attempt | #1 | #2 |
2
  | :---: | :---: | :---: |
3
  | Seconds | 301 | 283 |
 
 
 
 
1
  | Attempt | #1 | #2 |
2
  | :---: | :---: | :---: |
3
  | Seconds | 301 | 283 |
4
+
5
+ Metrics on valentini dataset with baseline = {'PESQ': 1.5693, 'STOI': 0.9504}
6
+ Metrics on valentini dataset with ideal denoising = {'PESQ': 1.9709, 'STOI': 0.9211}
denoisers/SpectralGating.py CHANGED
@@ -4,7 +4,6 @@ import torchaudio
4
 
5
 
6
  class SpectralGating(torch.nn.Module):
7
- """example: wav_noisy = '/media/public/datasets/denoising/DS_10283_2791/noisy_trainset_56spk_wav/p312_002.wav' """
8
  def __init__(self, rate=16000):
9
  super(SpectralGating, self).__init__()
10
  self.rate = rate
 
4
 
5
 
6
  class SpectralGating(torch.nn.Module):
 
7
  def __init__(self, rate=16000):
8
  super(SpectralGating, self).__init__()
9
  self.rate = rate
evaluation.py CHANGED
@@ -15,8 +15,9 @@ MODELS = {
15
 
16
 
17
 
18
- def evaluate_on_dataset(model_name, dataset_path, dataset_type, ideal):
19
- model = MODELS[model_name]()
 
20
  parser = PARSERS[dataset_type]
21
  clean_wavs, noisy_wavs = parser(dataset_path)
22
 
@@ -25,10 +26,11 @@ def evaluate_on_dataset(model_name, dataset_path, dataset_type, ideal):
25
  for clean_path, noisy_path in tqdm(zip(clean_wavs, noisy_wavs), total=len(clean_wavs)):
26
  clean_wav = load_wav(clean_path)
27
  noisy_wav = load_wav(noisy_path)
28
- denoised_wav = model(noisy_wav)
29
- if ideal:
30
  scores = metrics.calculate(noisy_wav, clean_wav)
31
  else:
 
32
  scores = metrics.calculate(noisy_wav, denoised_wav)
33
 
34
  mean_scores['PESQ'] += scores['PESQ']
@@ -49,14 +51,12 @@ if __name__ == '__main__':
49
  choices=['valentini'])
50
  parser.add_argument('--model_name', type=str,
51
  choices=['baseline'])
52
- parser.add_argument('--ideal', type=bool, default=False,
53
- help="Evaluate metrics on testing data with ideal denoising")
54
 
55
  args = parser.parse_args()
56
 
57
  mean_scores = evaluate_on_dataset(model_name=args.model_name,
58
  dataset_path=args.dataset_path,
59
- dataset_type=args.dataset_type,
60
- ideal=args.ideal)
61
  print(f"Metrics on {args.dataset_type} dataset with "
62
  f"{args.model_name if args.model_name is not None else 'ideal denoising'} = {mean_scores}")
 
15
 
16
 
17
 
18
+ def evaluate_on_dataset(model_name, dataset_path, dataset_type):
19
+ if model_name is not None:
20
+ model = MODELS[model_name]()
21
  parser = PARSERS[dataset_type]
22
  clean_wavs, noisy_wavs = parser(dataset_path)
23
 
 
26
  for clean_path, noisy_path in tqdm(zip(clean_wavs, noisy_wavs), total=len(clean_wavs)):
27
  clean_wav = load_wav(clean_path)
28
  noisy_wav = load_wav(noisy_path)
29
+
30
+ if model_name is None:
31
  scores = metrics.calculate(noisy_wav, clean_wav)
32
  else:
33
+ denoised_wav = model(noisy_wav)
34
  scores = metrics.calculate(noisy_wav, denoised_wav)
35
 
36
  mean_scores['PESQ'] += scores['PESQ']
 
51
  choices=['valentini'])
52
  parser.add_argument('--model_name', type=str,
53
  choices=['baseline'])
54
+
 
55
 
56
  args = parser.parse_args()
57
 
58
  mean_scores = evaluate_on_dataset(model_name=args.model_name,
59
  dataset_path=args.dataset_path,
60
+ dataset_type=args.dataset_type)
 
61
  print(f"Metrics on {args.dataset_type} dataset with "
62
  f"{args.model_name if args.model_name is not None else 'ideal denoising'} = {mean_scores}")