jbetker commited on
Commit
f7c8dec
β€’
1 Parent(s): 33f60ce

Move everything into the tortoise/ subdirectory

Browse files
{utils β†’ tortoise}/__init__.py RENAMED
File without changes
api.py β†’ tortoise/api.py RENAMED
@@ -1,4 +1,3 @@
1
- import argparse
2
  import os
3
  import random
4
  from urllib import request
@@ -8,19 +7,18 @@ import torch.nn.functional as F
8
  import progressbar
9
  import torchaudio
10
 
11
- from models.classifier import AudioMiniEncoderWithClassifierHead
12
- from models.cvvp import CVVP
13
- from models.diffusion_decoder import DiffusionTts
14
- from models.autoregressive import UnifiedVoice
15
  from tqdm import tqdm
16
 
17
- from models.arch_util import TorchMelSpectrogram
18
- from models.clvp import CLVP
19
- from models.vocoder import UnivNetGenerator
20
- from utils.audio import load_audio, wav_to_univnet_mel, denormalize_tacotron_mel
21
- from utils.diffusion import SpacedDiffusion, space_timesteps, get_named_beta_schedule
22
- from utils.tokenizer import VoiceBpeTokenizer, lev_distance
23
-
24
 
25
  pbar = None
26
 
 
 
1
  import os
2
  import random
3
  from urllib import request
 
7
  import progressbar
8
  import torchaudio
9
 
10
+ from tortoise.models.classifier import AudioMiniEncoderWithClassifierHead
11
+ from tortoise.models.cvvp import CVVP
12
+ from tortoise.models.diffusion_decoder import DiffusionTts
13
+ from tortoise.models.autoregressive import UnifiedVoice
14
  from tqdm import tqdm
15
 
16
+ from tortoise.models.arch_util import TorchMelSpectrogram
17
+ from tortoise.models.clvp import CLVP
18
+ from tortoise.models.vocoder import UnivNetGenerator
19
+ from tortoise.utils.audio import wav_to_univnet_mel, denormalize_tacotron_mel
20
+ from tortoise.utils.diffusion import SpacedDiffusion, space_timesteps, get_named_beta_schedule
21
+ from tortoise.utils.tokenizer import VoiceBpeTokenizer
 
22
 
23
  pbar = None
24
 
do_tts.py β†’ tortoise/do_tts.py RENAMED
@@ -4,7 +4,7 @@ import os
4
  import torchaudio
5
 
6
  from api import TextToSpeech
7
- from utils.audio import load_audio, get_voices
8
 
9
  if __name__ == '__main__':
10
  parser = argparse.ArgumentParser()
 
4
  import torchaudio
5
 
6
  from api import TextToSpeech
7
+ from tortoise.utils.audio import load_audio, get_voices
8
 
9
  if __name__ == '__main__':
10
  parser = argparse.ArgumentParser()
eval_multiple.py β†’ tortoise/eval_multiple.py RENAMED
@@ -3,7 +3,7 @@ import os
3
  import torchaudio
4
 
5
  from api import TextToSpeech
6
- from utils.audio import load_audio
7
 
8
  if __name__ == '__main__':
9
  fname = 'Y:\\clips\\books2\\subset512-oco.tsv'
 
3
  import torchaudio
4
 
5
  from api import TextToSpeech
6
+ from tortoise.utils.audio import load_audio
7
 
8
  if __name__ == '__main__':
9
  fname = 'Y:\\clips\\books2\\subset512-oco.tsv'
is_this_from_tortoise.py β†’ tortoise/is_this_from_tortoise.py RENAMED
@@ -1,7 +1,7 @@
1
  import argparse
2
 
3
  from api import classify_audio_clip
4
- from utils.audio import load_audio
5
 
6
  if __name__ == '__main__':
7
  parser = argparse.ArgumentParser()
 
1
  import argparse
2
 
3
  from api import classify_audio_clip
4
+ from tortoise.utils.audio import load_audio
5
 
6
  if __name__ == '__main__':
7
  parser = argparse.ArgumentParser()
{models β†’ tortoise/models}/arch_util.py RENAMED
@@ -5,7 +5,7 @@ import torch
5
  import torch.nn as nn
6
  import torch.nn.functional as F
7
  import torchaudio
8
- from models.xtransformers import ContinuousTransformerWrapper, RelativePositionBias
9
 
10
 
11
  def zero_module(module):
 
5
  import torch.nn as nn
6
  import torch.nn.functional as F
7
  import torchaudio
8
+ from tortoise.models.xtransformers import ContinuousTransformerWrapper, RelativePositionBias
9
 
10
 
11
  def zero_module(module):
{models β†’ tortoise/models}/autoregressive.py RENAMED
@@ -6,8 +6,8 @@ import torch.nn.functional as F
6
  from transformers import GPT2Config, GPT2PreTrainedModel, LogitsProcessorList
7
  from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
8
  from transformers.utils.model_parallel_utils import get_device_map, assert_device_map
9
- from models.arch_util import AttentionBlock
10
- from utils.typical_sampling import TypicalLogitsWarper
11
 
12
 
13
  def null_position_embeddings(range, dim):
 
6
  from transformers import GPT2Config, GPT2PreTrainedModel, LogitsProcessorList
7
  from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
8
  from transformers.utils.model_parallel_utils import get_device_map, assert_device_map
9
+ from tortoise.models.arch_util import AttentionBlock
10
+ from tortoise.utils.typical_sampling import TypicalLogitsWarper
11
 
12
 
13
  def null_position_embeddings(range, dim):
{models β†’ tortoise/models}/classifier.py RENAMED
@@ -1,9 +1,8 @@
1
  import torch
2
  import torch.nn as nn
3
- import torch.nn.functional as F
4
  from torch.utils.checkpoint import checkpoint
5
 
6
- from models.arch_util import Upsample, Downsample, normalization, zero_module, AttentionBlock
7
 
8
 
9
  class ResBlock(nn.Module):
 
1
  import torch
2
  import torch.nn as nn
 
3
  from torch.utils.checkpoint import checkpoint
4
 
5
+ from tortoise.models.arch_util import Upsample, Downsample, normalization, zero_module, AttentionBlock
6
 
7
 
8
  class ResBlock(nn.Module):
{models β†’ tortoise/models}/clvp.py RENAMED
@@ -3,9 +3,9 @@ import torch.nn as nn
3
  import torch.nn.functional as F
4
  from torch import einsum
5
 
6
- from models.arch_util import CheckpointedXTransformerEncoder
7
- from models.transformer import Transformer
8
- from models.xtransformers import Encoder
9
 
10
 
11
  def exists(val):
 
3
  import torch.nn.functional as F
4
  from torch import einsum
5
 
6
+ from tortoise.models.arch_util import CheckpointedXTransformerEncoder
7
+ from tortoise.models.transformer import Transformer
8
+ from tortoise.models.xtransformers import Encoder
9
 
10
 
11
  def exists(val):
{models β†’ tortoise/models}/cvvp.py RENAMED
@@ -4,8 +4,8 @@ import torch.nn.functional as F
4
  from torch import einsum
5
  from torch.utils.checkpoint import checkpoint
6
 
7
- from models.arch_util import AttentionBlock
8
- from models.xtransformers import ContinuousTransformerWrapper, Encoder
9
 
10
 
11
  def exists(val):
 
4
  from torch import einsum
5
  from torch.utils.checkpoint import checkpoint
6
 
7
+ from tortoise.models.arch_util import AttentionBlock
8
+ from tortoise.models.xtransformers import ContinuousTransformerWrapper, Encoder
9
 
10
 
11
  def exists(val):
{models β†’ tortoise/models}/diffusion_decoder.py RENAMED
@@ -7,7 +7,7 @@ import torch.nn as nn
7
  import torch.nn.functional as F
8
  from torch import autocast
9
 
10
- from models.arch_util import normalization, AttentionBlock
11
 
12
 
13
  def is_latent(t):
 
7
  import torch.nn.functional as F
8
  from torch import autocast
9
 
10
+ from tortoise.models.arch_util import normalization, AttentionBlock
11
 
12
 
13
  def is_latent(t):
{models β†’ tortoise/models}/transformer.py RENAMED
File without changes
{models β†’ tortoise/models}/vocoder.py RENAMED
File without changes
{models β†’ tortoise/models}/xtransformers.py RENAMED
File without changes
read.py β†’ tortoise/read.py RENAMED
@@ -2,12 +2,10 @@ import argparse
2
  import os
3
 
4
  import torch
5
- import torch.nn.functional as F
6
  import torchaudio
7
 
8
- from api import TextToSpeech, format_conditioning
9
- from utils.audio import load_audio, get_voices
10
- from utils.tokenizer import VoiceBpeTokenizer
11
 
12
 
13
  def split_and_recombine_text(texts, desired_length=200, max_len=300):
 
2
  import os
3
 
4
  import torch
 
5
  import torchaudio
6
 
7
+ from api import TextToSpeech
8
+ from tortoise.utils.audio import load_audio, get_voices
 
9
 
10
 
11
  def split_and_recombine_text(texts, desired_length=200, max_len=300):
samples_generator.py β†’ tortoise/samples_generator.py RENAMED
@@ -4,7 +4,7 @@ import os
4
 
5
  if __name__ == '__main__':
6
  result = "<html><head><title>These words were never spoken.</title></head><body><h1>Handpicked results</h1>"
7
- for fv in os.listdir('results/favorites'):
8
  url = f'https://github.com/neonbjb/tortoise-tts/raw/main/results/favorites/{fv}'
9
  result = result + f'<audio controls="" style="width: 600px;"><source src="{url}" type="audio/mp3"></audio><br>\n'
10
 
@@ -30,7 +30,7 @@ if __name__ == '__main__':
30
  line = line + f'<td><audio controls="" style="width: 150px;"><source src="{url}" type="audio/mp3"></audio></td>'
31
  line = line + "</tr>"
32
  lines.append(line)
33
- for txt in os.listdir('results/various/'):
34
  if 'desktop' in txt:
35
  continue
36
  line = f'<tr><td>{txt}</td>'
@@ -42,7 +42,7 @@ if __name__ == '__main__':
42
  result = result + '\n'.join(lines) + "</table>"
43
 
44
  result = result + "<h1>Longform result for all voices:</h1>"
45
- for lf in os.listdir('results/riding_hood'):
46
  url = f'https://github.com/neonbjb/tortoise-tts/raw/main/results/riding_hood/{lf}'
47
  result = result + f'<audio controls="" style="width: 600px;"><source src="{url}" type="audio/mp3"></audio><br>\n'
48
 
 
4
 
5
  if __name__ == '__main__':
6
  result = "<html><head><title>These words were never spoken.</title></head><body><h1>Handpicked results</h1>"
7
+ for fv in os.listdir('../results/favorites'):
8
  url = f'https://github.com/neonbjb/tortoise-tts/raw/main/results/favorites/{fv}'
9
  result = result + f'<audio controls="" style="width: 600px;"><source src="{url}" type="audio/mp3"></audio><br>\n'
10
 
 
30
  line = line + f'<td><audio controls="" style="width: 150px;"><source src="{url}" type="audio/mp3"></audio></td>'
31
  line = line + "</tr>"
32
  lines.append(line)
33
+ for txt in os.listdir('../results/various/'):
34
  if 'desktop' in txt:
35
  continue
36
  line = f'<tr><td>{txt}</td>'
 
42
  result = result + '\n'.join(lines) + "</table>"
43
 
44
  result = result + "<h1>Longform result for all voices:</h1>"
45
+ for lf in os.listdir('../results/riding_hood'):
46
  url = f'https://github.com/neonbjb/tortoise-tts/raw/main/results/riding_hood/{lf}'
47
  result = result + f'<audio controls="" style="width: 600px;"><source src="{url}" type="audio/mp3"></audio><br>\n'
48
 
sweep.py β†’ tortoise/sweep.py RENAMED
@@ -4,7 +4,7 @@ from random import shuffle
4
  import torchaudio
5
 
6
  from api import TextToSpeech
7
- from utils.audio import load_audio
8
 
9
 
10
  def permutations(args):
 
4
  import torchaudio
5
 
6
  from api import TextToSpeech
7
+ from tortoise.utils.audio import load_audio
8
 
9
 
10
  def permutations(args):
tortoise/utils/__init__.py ADDED
File without changes
{utils β†’ tortoise/utils}/audio.py RENAMED
@@ -6,7 +6,7 @@ import torchaudio
6
  import numpy as np
7
  from scipy.io.wavfile import read
8
 
9
- from utils.stft import STFT
10
 
11
 
12
  def load_wav_to_torch(full_path):
 
6
  import numpy as np
7
  from scipy.io.wavfile import read
8
 
9
+ from tortoise.utils.stft import STFT
10
 
11
 
12
  def load_wav_to_torch(full_path):
{utils β†’ tortoise/utils}/diffusion.py RENAMED
File without changes
{utils β†’ tortoise/utils}/stft.py RENAMED
File without changes
{utils β†’ tortoise/utils}/tokenizer.py RENAMED
File without changes
{utils β†’ tortoise/utils}/typical_sampling.py RENAMED
File without changes