Upload model
Browse files- .gitattributes +1 -0
- README.md +14 -0
- __pycache__/miner.cpython-312.pyc +0 -0
- added_tokens.json +35 -0
- chute_config.yml +23 -0
- config.json +163 -0
- demo.py +98 -0
- generation_config.json +12 -0
- merges.txt +0 -0
- miner.py +364 -0
- model.safetensors +3 -0
- preprocessor_config.json +6 -0
- special_tokens_map.json +44 -0
- speech_tokenizer/config.json +94 -0
- speech_tokenizer/configuration.json +1 -0
- speech_tokenizer/model.safetensors +3 -0
- speech_tokenizer/preprocessor_config.json +10 -0
- tokenizer.json +3 -0
- tokenizer_config.json +318 -0
- vocab.json +0 -0
- vocence_config.yaml +16 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: cc-by-nc-sa-4.0
|
| 3 |
+
base_model: magma90909/vocence_miner_v8
|
| 4 |
+
pipeline_tag: text-to-speech
|
| 5 |
+
library_name: transformers
|
| 6 |
+
language:
|
| 7 |
+
- en
|
| 8 |
+
tags:
|
| 9 |
+
- tts
|
| 10 |
+
- prompttts
|
| 11 |
+
- qwen3-tts
|
| 12 |
+
- voice-design
|
| 13 |
+
- vocence
|
| 14 |
+
---
|
__pycache__/miner.cpython-312.pyc
ADDED
|
Binary file (17.8 kB). View file
|
|
|
added_tokens.json
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</think>": 151668,
|
| 3 |
+
"</tool_call>": 151658,
|
| 4 |
+
"</tool_response>": 151666,
|
| 5 |
+
"<think>": 151667,
|
| 6 |
+
"<tool_call>": 151657,
|
| 7 |
+
"<tool_response>": 151665,
|
| 8 |
+
"<tts_pad>": 151671,
|
| 9 |
+
"<tts_text_bos>": 151672,
|
| 10 |
+
"<tts_text_bos_single>": 151674,
|
| 11 |
+
"<tts_text_eod>": 151673,
|
| 12 |
+
"<|audio_end|>": 151670,
|
| 13 |
+
"<|audio_pad|>": 151675,
|
| 14 |
+
"<|audio_start|>": 151669,
|
| 15 |
+
"<|box_end|>": 151649,
|
| 16 |
+
"<|box_start|>": 151648,
|
| 17 |
+
"<|endoftext|>": 151643,
|
| 18 |
+
"<|file_sep|>": 151664,
|
| 19 |
+
"<|fim_middle|>": 151660,
|
| 20 |
+
"<|fim_pad|>": 151662,
|
| 21 |
+
"<|fim_prefix|>": 151659,
|
| 22 |
+
"<|fim_suffix|>": 151661,
|
| 23 |
+
"<|im_end|>": 151645,
|
| 24 |
+
"<|im_start|>": 151644,
|
| 25 |
+
"<|image_pad|>": 151655,
|
| 26 |
+
"<|object_ref_end|>": 151647,
|
| 27 |
+
"<|object_ref_start|>": 151646,
|
| 28 |
+
"<|quad_end|>": 151651,
|
| 29 |
+
"<|quad_start|>": 151650,
|
| 30 |
+
"<|repo_name|>": 151663,
|
| 31 |
+
"<|video_pad|>": 151656,
|
| 32 |
+
"<|vision_end|>": 151653,
|
| 33 |
+
"<|vision_pad|>": 151654,
|
| 34 |
+
"<|vision_start|>": 151652
|
| 35 |
+
}
|
chute_config.yml
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Image + node + Chute for Vocence deploy. Required in the HF repo at build time.
|
| 2 |
+
|
| 3 |
+
Image:
|
| 4 |
+
from_base: parachutes/python:3.12
|
| 5 |
+
run_command:
|
| 6 |
+
- pip install torch torchaudio transformers accelerate huggingface_hub pyyaml soundfile librosa
|
| 7 |
+
- pip install -U qwen-tts
|
| 8 |
+
set_workdir: /app
|
| 9 |
+
|
| 10 |
+
NodeSelector:
|
| 11 |
+
gpu_count: 1
|
| 12 |
+
min_vram_gb_per_gpu: 24
|
| 13 |
+
include: ["pro_6000"]
|
| 14 |
+
exclude: []
|
| 15 |
+
|
| 16 |
+
Chute:
|
| 17 |
+
tagline: Vocence TTS — Qwen3 PromptTTS (weights in repo)
|
| 18 |
+
readme: Qwen3 12Hz TTS snapshot + miner.py for Vocence
|
| 19 |
+
shutdown_after_seconds: 86400
|
| 20 |
+
concurrency: 1
|
| 21 |
+
max_instances: 1
|
| 22 |
+
scaling_threshold: 0.5
|
| 23 |
+
tee: true
|
config.json
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen3TTSForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"assistant_token_id": 77091,
|
| 6 |
+
"im_end_token_id": 151645,
|
| 7 |
+
"im_start_token_id": 151644,
|
| 8 |
+
"tts_bos_token_id": 151672,
|
| 9 |
+
"tts_eos_token_id": 151673,
|
| 10 |
+
"tts_pad_token_id": 151671,
|
| 11 |
+
"model_type": "qwen3_tts",
|
| 12 |
+
"tokenizer_type": "qwen3_tts_tokenizer_12hz",
|
| 13 |
+
"tts_model_size": "1b7",
|
| 14 |
+
"tts_model_type": "voice_design",
|
| 15 |
+
"talker_config": {
|
| 16 |
+
"attention_bias": false,
|
| 17 |
+
"attention_dropout": 0,
|
| 18 |
+
"code_predictor_config": {
|
| 19 |
+
"_name_or_path": "",
|
| 20 |
+
"add_cross_attention": false,
|
| 21 |
+
"architectures": null,
|
| 22 |
+
"attention_bias": false,
|
| 23 |
+
"attention_dropout": 0,
|
| 24 |
+
"bad_words_ids": null,
|
| 25 |
+
"begin_suppress_tokens": null,
|
| 26 |
+
"bos_token_id": null,
|
| 27 |
+
"chunk_size_feed_forward": 0,
|
| 28 |
+
"cross_attention_hidden_size": null,
|
| 29 |
+
"decoder_start_token_id": null,
|
| 30 |
+
"diversity_penalty": 0.0,
|
| 31 |
+
"do_sample": false,
|
| 32 |
+
"early_stopping": false,
|
| 33 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 34 |
+
"eos_token_id": null,
|
| 35 |
+
"exponential_decay_length_penalty": null,
|
| 36 |
+
"finetuning_task": null,
|
| 37 |
+
"forced_bos_token_id": null,
|
| 38 |
+
"forced_eos_token_id": null,
|
| 39 |
+
"head_dim": 128,
|
| 40 |
+
"hidden_act": "silu",
|
| 41 |
+
"hidden_size": 1024,
|
| 42 |
+
"id2label": {
|
| 43 |
+
"0": "LABEL_0",
|
| 44 |
+
"1": "LABEL_1"
|
| 45 |
+
},
|
| 46 |
+
"initializer_range": 0.02,
|
| 47 |
+
"intermediate_size": 3072,
|
| 48 |
+
"is_decoder": false,
|
| 49 |
+
"is_encoder_decoder": false,
|
| 50 |
+
"label2id": {
|
| 51 |
+
"LABEL_0": 0,
|
| 52 |
+
"LABEL_1": 1
|
| 53 |
+
},
|
| 54 |
+
"layer_types": [
|
| 55 |
+
"full_attention",
|
| 56 |
+
"full_attention",
|
| 57 |
+
"full_attention",
|
| 58 |
+
"full_attention",
|
| 59 |
+
"full_attention"
|
| 60 |
+
],
|
| 61 |
+
"length_penalty": 1.0,
|
| 62 |
+
"max_length": 20,
|
| 63 |
+
"max_position_embeddings": 65536,
|
| 64 |
+
"max_window_layers": 28,
|
| 65 |
+
"min_length": 0,
|
| 66 |
+
"model_type": "qwen3_tts_talker_code_predictor",
|
| 67 |
+
"no_repeat_ngram_size": 0,
|
| 68 |
+
"num_attention_heads": 16,
|
| 69 |
+
"num_beam_groups": 1,
|
| 70 |
+
"num_beams": 1,
|
| 71 |
+
"num_code_groups": 16,
|
| 72 |
+
"num_hidden_layers": 5,
|
| 73 |
+
"num_key_value_heads": 8,
|
| 74 |
+
"num_return_sequences": 1,
|
| 75 |
+
"output_attentions": false,
|
| 76 |
+
"output_hidden_states": false,
|
| 77 |
+
"output_scores": false,
|
| 78 |
+
"pad_token_id": null,
|
| 79 |
+
"prefix": null,
|
| 80 |
+
"problem_type": null,
|
| 81 |
+
"pruned_heads": {},
|
| 82 |
+
"remove_invalid_values": false,
|
| 83 |
+
"repetition_penalty": 1.0,
|
| 84 |
+
"return_dict": true,
|
| 85 |
+
"return_dict_in_generate": false,
|
| 86 |
+
"rms_norm_eps": 1e-06,
|
| 87 |
+
"rope_scaling": null,
|
| 88 |
+
"rope_theta": 1000000,
|
| 89 |
+
"sep_token_id": null,
|
| 90 |
+
"sliding_window": null,
|
| 91 |
+
"suppress_tokens": null,
|
| 92 |
+
"task_specific_params": null,
|
| 93 |
+
"temperature": 1.0,
|
| 94 |
+
"tf_legacy_loss": false,
|
| 95 |
+
"tie_encoder_decoder": false,
|
| 96 |
+
"tie_word_embeddings": false,
|
| 97 |
+
"tokenizer_class": null,
|
| 98 |
+
"top_k": 50,
|
| 99 |
+
"top_p": 1.0,
|
| 100 |
+
"dtype": null,
|
| 101 |
+
"torchscript": false,
|
| 102 |
+
"typical_p": 1.0,
|
| 103 |
+
"use_bfloat16": false,
|
| 104 |
+
"use_cache": true,
|
| 105 |
+
"use_sliding_window": false,
|
| 106 |
+
"vocab_size": 2048
|
| 107 |
+
},
|
| 108 |
+
"codec_bos_id": 2149,
|
| 109 |
+
"codec_eos_token_id": 2150,
|
| 110 |
+
"codec_think_id": 2154,
|
| 111 |
+
"codec_language_id": {
|
| 112 |
+
"chinese": 2055,
|
| 113 |
+
"english": 2050,
|
| 114 |
+
"german": 2053,
|
| 115 |
+
"italian": 2070,
|
| 116 |
+
"portuguese": 2071,
|
| 117 |
+
"spanish": 2054,
|
| 118 |
+
"japanese": 2058,
|
| 119 |
+
"korean": 2064,
|
| 120 |
+
"french": 2061,
|
| 121 |
+
"russian": 2069
|
| 122 |
+
},
|
| 123 |
+
"codec_nothink_id": 2155,
|
| 124 |
+
"codec_pad_id": 2148,
|
| 125 |
+
"codec_think_bos_id": 2156,
|
| 126 |
+
"codec_think_eos_id": 2157,
|
| 127 |
+
"spk_id": {
|
| 128 |
+
},
|
| 129 |
+
"spk_is_dialect": {
|
| 130 |
+
},
|
| 131 |
+
"head_dim": 128,
|
| 132 |
+
"hidden_act": "silu",
|
| 133 |
+
"hidden_size": 2048,
|
| 134 |
+
"initializer_range": 0.02,
|
| 135 |
+
"intermediate_size": 6144,
|
| 136 |
+
"max_position_embeddings": 32768,
|
| 137 |
+
"model_type": "qwen3_tts_talker",
|
| 138 |
+
"num_attention_heads": 16,
|
| 139 |
+
"num_code_groups": 16,
|
| 140 |
+
"num_hidden_layers": 28,
|
| 141 |
+
"num_key_value_heads": 8,
|
| 142 |
+
"position_id_per_seconds": 13,
|
| 143 |
+
"rms_norm_eps": 1e-06,
|
| 144 |
+
"rope_scaling": {
|
| 145 |
+
"interleaved": true,
|
| 146 |
+
"mrope_section": [
|
| 147 |
+
24,
|
| 148 |
+
20,
|
| 149 |
+
20
|
| 150 |
+
],
|
| 151 |
+
"rope_type": "default",
|
| 152 |
+
"type": "default"
|
| 153 |
+
},
|
| 154 |
+
"rope_theta": 1000000,
|
| 155 |
+
"sliding_window": null,
|
| 156 |
+
"text_hidden_size": 2048,
|
| 157 |
+
"text_vocab_size": 151936,
|
| 158 |
+
"use_cache": true,
|
| 159 |
+
"use_sliding_window": false,
|
| 160 |
+
"vocab_size": 3072
|
| 161 |
+
},
|
| 162 |
+
"transformers_version": "4.57.3"
|
| 163 |
+
}
|
demo.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""demo.py — quick smoke test for vocence_miner_v1.
|
| 2 |
+
|
| 3 |
+
Reads the merged checkpoint either from a local path or from the Hugging Face Hub,
|
| 4 |
+
then generates a small set of preset clips that exercise the prompt-following range.
|
| 5 |
+
|
| 6 |
+
pip install qwen-tts transformers torch soundfile
|
| 7 |
+
python demo.py # uses the current directory
|
| 8 |
+
python demo.py --source magma90909/vocence_miner_v8 # pull from HF
|
| 9 |
+
"""
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import argparse
|
| 13 |
+
import dataclasses
|
| 14 |
+
import sys
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
|
| 17 |
+
import soundfile as sf
|
| 18 |
+
import torch
|
| 19 |
+
from qwen_tts import Qwen3TTSModel
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@dataclasses.dataclass(frozen=True)
|
| 23 |
+
class Sample:
|
| 24 |
+
slug: str
|
| 25 |
+
say: str
|
| 26 |
+
voice: str
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
SAMPLES: tuple[Sample, ...] = (
|
| 30 |
+
Sample(
|
| 31 |
+
slug="warm_male_storyteller",
|
| 32 |
+
say="Long ago, in a kingdom by the sea, a young girl made a remarkable discovery.",
|
| 33 |
+
voice="An older male narrator reads a bedtime story slowly, with warmth.",
|
| 34 |
+
),
|
| 35 |
+
Sample(
|
| 36 |
+
slug="whisper_female",
|
| 37 |
+
say="Don't say a word. Just listen carefully.",
|
| 38 |
+
voice="A young woman whispers, conspiratorial, low energy, very quiet.",
|
| 39 |
+
),
|
| 40 |
+
Sample(
|
| 41 |
+
slug="projecting_announcer",
|
| 42 |
+
say="And he scores in the final second of the match!",
|
| 43 |
+
voice="A high-pitched announcer projects an exciting headline at a fast pace.",
|
| 44 |
+
),
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
SAMPLER = dict(
|
| 49 |
+
temperature=0.85,
|
| 50 |
+
top_k=50,
|
| 51 |
+
top_p=0.95,
|
| 52 |
+
repetition_penalty=1.05,
|
| 53 |
+
max_new_tokens=600,
|
| 54 |
+
do_sample=True,
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def parse_args(argv: list[str] | None = None) -> argparse.Namespace:
|
| 59 |
+
p = argparse.ArgumentParser(description=__doc__.split("\n", 1)[0])
|
| 60 |
+
p.add_argument("--source", default=".", help="HF repo id or local checkpoint dir")
|
| 61 |
+
p.add_argument("--out", default="./demo_out", help="output dir for wav files")
|
| 62 |
+
p.add_argument("--precision", default="bfloat16", choices=("bfloat16", "float16", "float32"))
|
| 63 |
+
p.add_argument("--device", default="cuda:0" if torch.cuda.is_available() else "cpu")
|
| 64 |
+
return p.parse_args(argv)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def load(source: str, device: str, precision: str) -> Qwen3TTSModel:
|
| 68 |
+
dtype = {"bfloat16": torch.bfloat16, "float16": torch.float16, "float32": torch.float32}[precision]
|
| 69 |
+
print(f"[demo] loading {source!r} -> {device} ({precision})", flush=True)
|
| 70 |
+
return Qwen3TTSModel.from_pretrained(source, device_map=device, dtype=dtype)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def synth_one(model: Qwen3TTSModel, sample: Sample, out_dir: Path) -> Path:
|
| 74 |
+
wavs, sr = model.generate_voice_design(
|
| 75 |
+
text=sample.say,
|
| 76 |
+
instruct=sample.voice,
|
| 77 |
+
language="english",
|
| 78 |
+
**SAMPLER,
|
| 79 |
+
)
|
| 80 |
+
target = out_dir / f"{sample.slug}.wav"
|
| 81 |
+
sf.write(target, wavs[0], sr)
|
| 82 |
+
duration = len(wavs[0]) / sr
|
| 83 |
+
print(f" -> {target.name} ({duration:.2f}s @ {sr} Hz)")
|
| 84 |
+
return target
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def run(args: argparse.Namespace) -> int:
|
| 88 |
+
out_dir = Path(args.out)
|
| 89 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 90 |
+
model = load(args.source, args.device, args.precision)
|
| 91 |
+
for sample in SAMPLES:
|
| 92 |
+
synth_one(model, sample, out_dir)
|
| 93 |
+
print(f"[demo] {len(SAMPLES)} clips written to {out_dir}/", flush=True)
|
| 94 |
+
return 0
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
if __name__ == "__main__":
|
| 98 |
+
sys.exit(run(parse_args()))
|
generation_config.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_sample": true,
|
| 3 |
+
"repetition_penalty": 1.05,
|
| 4 |
+
"temperature": 0.9,
|
| 5 |
+
"top_p": 1.0,
|
| 6 |
+
"top_k": 50,
|
| 7 |
+
"subtalker_dosample": true,
|
| 8 |
+
"subtalker_temperature": 0.9,
|
| 9 |
+
"subtalker_top_p": 1.0,
|
| 10 |
+
"subtalker_top_k": 50,
|
| 11 |
+
"max_new_tokens": 8192
|
| 12 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
miner.py
ADDED
|
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Vocence engine for the merged Qwen3-TTS VoiceDesign checkpoint.
|
| 2 |
+
|
| 3 |
+
The Vocence Chutes wrapper instantiates ``Miner`` with the on-disk path of the HF
|
| 4 |
+
snapshot and then drives it through the contract:
|
| 5 |
+
|
| 6 |
+
Miner(path_hf_repo: Path)
|
| 7 |
+
warmup() -> None
|
| 8 |
+
generate_wav(instruction: str, text: str) -> tuple[np.ndarray, int]
|
| 9 |
+
|
| 10 |
+
All weights, the audio codec, and the tokenizer ship together in the snapshot —
|
| 11 |
+
nothing is fetched at runtime.
|
| 12 |
+
"""
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import dataclasses
|
| 16 |
+
import re
|
| 17 |
+
import threading
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
from typing import Any
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
_REPO_REQUIRED_FILE = "config.json"
|
| 25 |
+
_RUNTIME_CONFIG_FILE = "vocence_config.yaml"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# --------------------------------------------------------------------------- #
|
| 29 |
+
# Instruction rewrite (tag -> natural-language preamble) #
|
| 30 |
+
# --------------------------------------------------------------------------- #
|
| 31 |
+
#
|
| 32 |
+
# Validators may send instructions in the legacy pipe-tag form, e.g.
|
| 33 |
+
# ``| gender: male | pitch: mid | accent: uk |``. The base voice_design
|
| 34 |
+
# checkpoint was conditioned on natural-language descriptions, so we paraphrase
|
| 35 |
+
# the tags into a short imperative preamble and *prepend* it to whatever the
|
| 36 |
+
# caller sent. Free-form prompts (no ``| key: value |`` pairs) pass through
|
| 37 |
+
# unchanged because ``_parse_instruction`` returns ``{}`` for them.
|
| 38 |
+
|
| 39 |
+
# One ``| key: value |`` pair. Value runs until the next ``|`` or end-of-string;
|
| 40 |
+
# the lookahead keeps the trailing ``|`` available for the next iteration.
|
| 41 |
+
_INSTRUCTION_TAG_RE = re.compile(
|
| 42 |
+
r"\|\s*([A-Za-z_]+)\s*:\s*([^|]+?)\s*(?=\||$)"
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
_GENDER_PHRASE = {
|
| 46 |
+
"male": "male", "female": "female", "neutral": "gender-neutral",
|
| 47 |
+
}
|
| 48 |
+
_PITCH_PHRASE = {
|
| 49 |
+
"low": "deep low-pitched voice", "mid": "medium natural pitch", "high": "high-pitched voice",
|
| 50 |
+
}
|
| 51 |
+
_SPEED_PHRASE = {
|
| 52 |
+
"slow": "slow deliberate pace", "normal": "natural conversational pace", "fast": "brisk fast pace",
|
| 53 |
+
}
|
| 54 |
+
_AGE_PHRASE = {
|
| 55 |
+
"child": "child", "young_adult": "young adult", "adult": "adult", "senior": "elderly senior",
|
| 56 |
+
}
|
| 57 |
+
_EMOTION_PHRASE = {
|
| 58 |
+
"neutral": "neutral composed delivery",
|
| 59 |
+
"happy": "cheerful happy upbeat warm",
|
| 60 |
+
"sad": "sorrowful sad subdued downcast",
|
| 61 |
+
"angry": "firm angry forceful assertive tense",
|
| 62 |
+
"calm": "calm relaxed measured peaceful unhurried",
|
| 63 |
+
"excited": "excited enthusiastic energetic lively",
|
| 64 |
+
"serious": "serious grave deliberate weighty",
|
| 65 |
+
"fearful": "nervous fearful hesitant trembling",
|
| 66 |
+
}
|
| 67 |
+
_TONE_PHRASE = {
|
| 68 |
+
"warm": "warm", "cold": "cold detached", "friendly": "friendly",
|
| 69 |
+
"formal": "formal", "casual": "casual", "authoritative": "authoritative commanding",
|
| 70 |
+
}
|
| 71 |
+
_ACCENT_PHRASE = {
|
| 72 |
+
"us": "standard American English accent with rhotic r sounds",
|
| 73 |
+
"uk": "standard British English accent with non-rhotic received pronunciation",
|
| 74 |
+
"au": "Australian English accent",
|
| 75 |
+
"in": "Indian English accent",
|
| 76 |
+
"neutral": "neutral international English accent",
|
| 77 |
+
"other": "non-native English accent",
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _parse_instruction(instruction: str) -> dict[str, str]:
|
| 82 |
+
"""Parse a pipe-tag instruction (``| key: value | ...``) into a flat dict.
|
| 83 |
+
|
| 84 |
+
Keys are lowercased; values are lowercased and stripped. Returns ``{}``
|
| 85 |
+
for free-form natural-language prompts (no tag pairs found), which
|
| 86 |
+
signals ``_enhance_instruction`` to pass them through unchanged. Unknown
|
| 87 |
+
or out-of-vocabulary values quietly drop out at preamble-build time
|
| 88 |
+
because the phrase tables only contain mappings we trust to be in the
|
| 89 |
+
base model's training distribution.
|
| 90 |
+
"""
|
| 91 |
+
if not instruction or "|" not in instruction:
|
| 92 |
+
return {}
|
| 93 |
+
out: dict[str, str] = {}
|
| 94 |
+
for m in _INSTRUCTION_TAG_RE.finditer(instruction):
|
| 95 |
+
key = m.group(1).strip().lower()
|
| 96 |
+
val = m.group(2).strip().lower()
|
| 97 |
+
if key and val:
|
| 98 |
+
out[key] = val
|
| 99 |
+
return out
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def _build_natural_preamble(parsed: dict[str, str]) -> str:
|
| 103 |
+
gender = _GENDER_PHRASE.get(parsed.get("gender", ""), "")
|
| 104 |
+
age = _AGE_PHRASE.get(parsed.get("age_group", ""), "")
|
| 105 |
+
pitch = _PITCH_PHRASE.get(parsed.get("pitch", ""), "")
|
| 106 |
+
speed = _SPEED_PHRASE.get(parsed.get("speed", ""), "")
|
| 107 |
+
emotion = _EMOTION_PHRASE.get(parsed.get("emotion", ""), "")
|
| 108 |
+
tone = _TONE_PHRASE.get(parsed.get("tone", ""), "")
|
| 109 |
+
accent = _ACCENT_PHRASE.get(parsed.get("accent", ""), "")
|
| 110 |
+
|
| 111 |
+
parts: list[str] = []
|
| 112 |
+
|
| 113 |
+
# Gender-first to avoid timbre drift on emotion-heavy prompts
|
| 114 |
+
identity = " ".join(p for p in [gender, age] if p)
|
| 115 |
+
if identity:
|
| 116 |
+
parts.append(f"a {identity} voice")
|
| 117 |
+
if emotion:
|
| 118 |
+
parts.append(emotion)
|
| 119 |
+
if accent:
|
| 120 |
+
parts.append(f"speaking with a {accent}")
|
| 121 |
+
if pitch:
|
| 122 |
+
parts.append(pitch)
|
| 123 |
+
if speed:
|
| 124 |
+
parts.append(speed)
|
| 125 |
+
if tone:
|
| 126 |
+
parts.append(f"{tone} tone")
|
| 127 |
+
|
| 128 |
+
if not parts:
|
| 129 |
+
return ""
|
| 130 |
+
preamble = "Speak as " + ", ".join(parts) + "."
|
| 131 |
+
return preamble + " Use natural human prosody with realistic breath placement and varied intonation."
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def _enhance_instruction(instruction: str) -> str:
|
| 135 |
+
"""Prepend a natural-language preamble derived from any pipe tags.
|
| 136 |
+
|
| 137 |
+
Pass-through when the input has no parseable tags or none of them map
|
| 138 |
+
to a known phrase (so the preamble would be empty). Always keeps the
|
| 139 |
+
original instruction at the end so the caller's free-form instructions
|
| 140 |
+
still influence the model.
|
| 141 |
+
"""
|
| 142 |
+
parsed = _parse_instruction(instruction)
|
| 143 |
+
if not parsed:
|
| 144 |
+
return instruction
|
| 145 |
+
preamble = _build_natural_preamble(parsed)
|
| 146 |
+
if not preamble:
|
| 147 |
+
return instruction
|
| 148 |
+
return f"{preamble} {instruction}"
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
# --------------------------------------------------------------------------- #
|
| 152 |
+
# Text normalization #
|
| 153 |
+
# --------------------------------------------------------------------------- #
|
| 154 |
+
|
| 155 |
+
_NUM_WORDS = {
|
| 156 |
+
"0": "zero", "1": "one", "2": "two", "3": "three", "4": "four",
|
| 157 |
+
"5": "five", "6": "six", "7": "seven", "8": "eight", "9": "nine",
|
| 158 |
+
"10": "ten", "11": "eleven", "12": "twelve", "13": "thirteen",
|
| 159 |
+
"14": "fourteen", "15": "fifteen", "16": "sixteen", "17": "seventeen",
|
| 160 |
+
"18": "eighteen", "19": "nineteen", "20": "twenty", "30": "thirty",
|
| 161 |
+
"40": "forty", "50": "fifty", "60": "sixty", "70": "seventy",
|
| 162 |
+
"80": "eighty", "90": "ninety", "100": "one hundred",
|
| 163 |
+
}
|
| 164 |
+
_ABBREV = {
|
| 165 |
+
"Mr.": "Mister", "Mrs.": "Missus", "Dr.": "Doctor", "St.": "Saint",
|
| 166 |
+
"etc.": "et cetera", "vs.": "versus", "approx.": "approximately",
|
| 167 |
+
"dept.": "department", "govt.": "government", "mgr.": "manager",
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
# Pre-compiled at module load so we don't recompile on every call.
|
| 171 |
+
_DOLLAR_RE = re.compile(r"\$(\d+)")
|
| 172 |
+
_POUND_RE = re.compile(r"£(\d+)")
|
| 173 |
+
_EURO_RE = re.compile(r"€(\d+)")
|
| 174 |
+
_SMALL_INT_RE = re.compile(r"\b(\d{1,2})\b")
|
| 175 |
+
_CONJ_RE = re.compile(
|
| 176 |
+
r"(?<!\,)\s+(but|however|although|though|yet)\s+",
|
| 177 |
+
flags=re.IGNORECASE,
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def _normalize_text_for_tts(text: str) -> str:
|
| 182 |
+
"""Rewrite a transcript so the talker emits cleaner, more prosodic speech.
|
| 183 |
+
|
| 184 |
+
Concretely: expand a small list of common abbreviations, turn currency-
|
| 185 |
+
prefixed integers into spelled-out phrases (``$5`` -> ``five dollars``),
|
| 186 |
+
spell out 1-2 digit standalone integers, and insert a comma before
|
| 187 |
+
coordinating conjunctions in long sentences so the model hears a beat
|
| 188 |
+
where humans naturally take one. Larger numbers, decimals, and unknown
|
| 189 |
+
abbreviations pass through unchanged.
|
| 190 |
+
"""
|
| 191 |
+
# Expand known abbreviations
|
| 192 |
+
for abbr, expansion in _ABBREV.items():
|
| 193 |
+
text = text.replace(abbr, expansion)
|
| 194 |
+
|
| 195 |
+
# Expand $N / £N / €N → "N dollars/pounds/euros"
|
| 196 |
+
text = _DOLLAR_RE.sub(
|
| 197 |
+
lambda m: f"{_NUM_WORDS.get(m.group(1), m.group(1))} dollars", text
|
| 198 |
+
)
|
| 199 |
+
text = _POUND_RE.sub(
|
| 200 |
+
lambda m: f"{_NUM_WORDS.get(m.group(1), m.group(1))} pounds", text
|
| 201 |
+
)
|
| 202 |
+
text = _EURO_RE.sub(
|
| 203 |
+
lambda m: f"{_NUM_WORDS.get(m.group(1), m.group(1))} euros", text
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
# Expand standalone small integers (not part of larger numbers)
|
| 207 |
+
text = _SMALL_INT_RE.sub(
|
| 208 |
+
lambda m: _NUM_WORDS.get(m.group(1), m.group(1)),
|
| 209 |
+
text,
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
# Add comma pause before coordinating conjunctions in long sentences
|
| 213 |
+
text = _CONJ_RE.sub(r", \1 ", text)
|
| 214 |
+
|
| 215 |
+
return text.strip()
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
@dataclasses.dataclass
|
| 219 |
+
class _RuntimeOpts:
|
| 220 |
+
"""Subset of vocence_config.yaml that the engine actually consumes."""
|
| 221 |
+
|
| 222 |
+
language: str = "English"
|
| 223 |
+
sample_rate: int = 24000
|
| 224 |
+
max_instruction_chars: int = 600
|
| 225 |
+
max_text_chars: int = 2000
|
| 226 |
+
device_pref: str = "cuda"
|
| 227 |
+
dtype_pref: str = "bfloat16"
|
| 228 |
+
flash_attention_2: bool = False
|
| 229 |
+
|
| 230 |
+
@classmethod
|
| 231 |
+
def from_repo(cls, repo: Path) -> "_RuntimeOpts":
|
| 232 |
+
cfg_path = repo / _RUNTIME_CONFIG_FILE
|
| 233 |
+
if not cfg_path.is_file():
|
| 234 |
+
return cls()
|
| 235 |
+
from yaml import safe_load
|
| 236 |
+
|
| 237 |
+
with cfg_path.open("r", encoding="utf-8") as fh:
|
| 238 |
+
data = safe_load(fh) or {}
|
| 239 |
+
runtime = data.get("runtime") or {}
|
| 240 |
+
generation = data.get("generation") or {}
|
| 241 |
+
limits = data.get("limits") or {}
|
| 242 |
+
return cls(
|
| 243 |
+
language=str(limits.get("default_language") or runtime.get("default_language") or "English"),
|
| 244 |
+
sample_rate=int(generation.get("sample_rate", 24000)),
|
| 245 |
+
max_instruction_chars=int(limits.get("max_instruction_chars", 600)),
|
| 246 |
+
max_text_chars=int(limits.get("max_text_chars", 2000)),
|
| 247 |
+
device_pref=str(runtime.get("device_preference", "cuda")).lower(),
|
| 248 |
+
dtype_pref=str(runtime.get("dtype", "bfloat16")).lower(),
|
| 249 |
+
flash_attention_2=bool(runtime.get("use_flash_attention_2", False)),
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
class Miner:
|
| 254 |
+
"""Loads merged Qwen3-TTS weights from the snapshot and serves the Vocence API."""
|
| 255 |
+
|
| 256 |
+
WARMUP_BUDGET_S = 180.0
|
| 257 |
+
|
| 258 |
+
def __init__(self, path_hf_repo: Path) -> None:
|
| 259 |
+
self.repo = Path(path_hf_repo).resolve()
|
| 260 |
+
if not (self.repo / _REPO_REQUIRED_FILE).is_file():
|
| 261 |
+
raise FileNotFoundError(
|
| 262 |
+
f"Snapshot incomplete: {self.repo / _REPO_REQUIRED_FILE} not found"
|
| 263 |
+
)
|
| 264 |
+
self.opts = _RuntimeOpts.from_repo(self.repo)
|
| 265 |
+
self.model = self._build_model()
|
| 266 |
+
|
| 267 |
+
def __repr__(self) -> str:
|
| 268 |
+
return f"<Miner repo={self.repo.name} language={self.opts.language!r}>"
|
| 269 |
+
|
| 270 |
+
# ------------------------------------------------------------------ #
|
| 271 |
+
# Vocence contract #
|
| 272 |
+
# ------------------------------------------------------------------ #
|
| 273 |
+
|
| 274 |
+
def warmup(self) -> None:
|
| 275 |
+
outcome: dict[str, Any] = {"ok": False, "err": None}
|
| 276 |
+
|
| 277 |
+
def _heat() -> None:
|
| 278 |
+
try:
|
| 279 |
+
self.generate_wav(instruction="Calm neutral delivery.", text="Warmup.")
|
| 280 |
+
outcome["ok"] = True
|
| 281 |
+
except Exception as exc: # noqa: BLE001 — surface to host
|
| 282 |
+
outcome["err"] = repr(exc)
|
| 283 |
+
|
| 284 |
+
worker = threading.Thread(target=_heat, daemon=True)
|
| 285 |
+
worker.start()
|
| 286 |
+
worker.join(timeout=self.WARMUP_BUDGET_S)
|
| 287 |
+
if not outcome["ok"]:
|
| 288 |
+
raise RuntimeError(f"Miner warmup did not complete: {outcome['err'] or 'timeout'}")
|
| 289 |
+
|
| 290 |
+
def generate_wav(self, instruction: str, text: str) -> tuple[np.ndarray, int]:
|
| 291 |
+
# Cap raw inputs first so an oversized payload never reaches the
|
| 292 |
+
# rewriter (which would just throw away the surplus anyway).
|
| 293 |
+
prompt = self._truncate(instruction, self.opts.max_instruction_chars)
|
| 294 |
+
body = self._truncate(text, self.opts.max_text_chars)
|
| 295 |
+
|
| 296 |
+
# Tag-form instructions get a natural-language preamble prepended;
|
| 297 |
+
# already-natural instructions pass through untouched.
|
| 298 |
+
prompt = _enhance_instruction(prompt)
|
| 299 |
+
# Spell out numbers/currency, expand a few abbreviations, and add
|
| 300 |
+
# a beat before coordinating conjunctions in long sentences.
|
| 301 |
+
body = _normalize_text_for_tts(body)
|
| 302 |
+
|
| 303 |
+
# The preamble + abbreviation/number expansion can lengthen the
|
| 304 |
+
# strings; re-clip to the same limits so we honour the contract
|
| 305 |
+
# advertised in vocence_config.yaml's ``limits`` block.
|
| 306 |
+
prompt = self._truncate(prompt, self.opts.max_instruction_chars)
|
| 307 |
+
body = self._truncate(body, self.opts.max_text_chars)
|
| 308 |
+
|
| 309 |
+
wavs, sample_rate = self.model.generate_voice_design(
|
| 310 |
+
text=body,
|
| 311 |
+
instruct=prompt,
|
| 312 |
+
language=self.opts.language,
|
| 313 |
+
)
|
| 314 |
+
if not wavs or wavs[0] is None:
|
| 315 |
+
raise ValueError("Qwen3-TTS returned no audio")
|
| 316 |
+
|
| 317 |
+
wave = self._coerce_mono_float32(wavs[0])
|
| 318 |
+
return wave, int(sample_rate)
|
| 319 |
+
|
| 320 |
+
# ------------------------------------------------------------------ #
|
| 321 |
+
# Internal #
|
| 322 |
+
# ------------------------------------------------------------------ #
|
| 323 |
+
|
| 324 |
+
@staticmethod
|
| 325 |
+
def _truncate(value: str, limit: int) -> str:
|
| 326 |
+
return value[:limit] if limit and limit > 0 else value
|
| 327 |
+
|
| 328 |
+
@staticmethod
|
| 329 |
+
def _coerce_mono_float32(arr: Any) -> np.ndarray:
|
| 330 |
+
wave = np.asarray(arr, dtype=np.float32)
|
| 331 |
+
if wave.ndim > 1:
|
| 332 |
+
wave = wave.mean(axis=1)
|
| 333 |
+
return wave
|
| 334 |
+
|
| 335 |
+
def _build_model(self):
|
| 336 |
+
import torch
|
| 337 |
+
from qwen_tts import Qwen3TTSModel
|
| 338 |
+
|
| 339 |
+
cuda_available = bool(torch.cuda.is_available())
|
| 340 |
+
device_map = "cuda:0" if (self.opts.device_pref == "cuda" and cuda_available) else "cpu"
|
| 341 |
+
torch_dtype = (
|
| 342 |
+
torch.bfloat16
|
| 343 |
+
if (self.opts.dtype_pref == "bfloat16" and cuda_available)
|
| 344 |
+
else torch.float32
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
attempt_order = ("flash_attention_2", "sdpa") if self.opts.flash_attention_2 else ("sdpa",)
|
| 348 |
+
last_error: BaseException | None = None
|
| 349 |
+
for attn in attempt_order:
|
| 350 |
+
try:
|
| 351 |
+
model = Qwen3TTSModel.from_pretrained(
|
| 352 |
+
pretrained_model_name_or_path=str(self.repo),
|
| 353 |
+
device_map=device_map,
|
| 354 |
+
dtype=torch_dtype,
|
| 355 |
+
attn_implementation=attn,
|
| 356 |
+
)
|
| 357 |
+
print(
|
| 358 |
+
f"[Miner] Qwen3-TTS ready on {device_map} "
|
| 359 |
+
f"(dtype={self.opts.dtype_pref}, attn={attn})"
|
| 360 |
+
)
|
| 361 |
+
return model
|
| 362 |
+
except Exception as exc: # noqa: BLE001 — try next attn variant
|
| 363 |
+
last_error = exc
|
| 364 |
+
raise RuntimeError(f"Qwen3-TTS failed to load: {last_error!r}")
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2151faf8114f41ac60301872e5e1b50020a865fde1e2d15862231aff7a7c04be
|
| 3 |
+
size 3833402644
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"padding_side": "left",
|
| 3 |
+
"padding_value": 0.0,
|
| 4 |
+
"processor_class": "Qwen3TTSProcessor",
|
| 5 |
+
"return_attention_mask": true
|
| 6 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>",
|
| 16 |
+
"<|audio_start|>",
|
| 17 |
+
"<|audio_end|>",
|
| 18 |
+
"<tts_pad>",
|
| 19 |
+
"<tts_text_bos>",
|
| 20 |
+
"<tts_text_bos_single>",
|
| 21 |
+
"<|audio_pad|>"
|
| 22 |
+
],
|
| 23 |
+
"audio_bos_token": "<|audio_start|>",
|
| 24 |
+
"audio_eos_token": "<|audio_end|>",
|
| 25 |
+
"audio_token": "<|audio_pad|>",
|
| 26 |
+
"eos_token": {
|
| 27 |
+
"content": "<|im_end|>",
|
| 28 |
+
"lstrip": false,
|
| 29 |
+
"normalized": false,
|
| 30 |
+
"rstrip": false,
|
| 31 |
+
"single_word": false
|
| 32 |
+
},
|
| 33 |
+
"image_token": "<|image_pad|>",
|
| 34 |
+
"pad_token": {
|
| 35 |
+
"content": "<|endoftext|>",
|
| 36 |
+
"lstrip": false,
|
| 37 |
+
"normalized": false,
|
| 38 |
+
"rstrip": false,
|
| 39 |
+
"single_word": false
|
| 40 |
+
},
|
| 41 |
+
"video_token": "<|video_pad|>",
|
| 42 |
+
"vision_bos_token": "<|vision_start|>",
|
| 43 |
+
"vision_eos_token": "<|vision_end|>"
|
| 44 |
+
}
|
speech_tokenizer/config.json
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen3TTSTokenizerV2Model"
|
| 4 |
+
],
|
| 5 |
+
"model_type": "qwen3_tts_tokenizer_12hz",
|
| 6 |
+
"encoder_valid_num_quantizers": 16,
|
| 7 |
+
"input_sample_rate": 24000,
|
| 8 |
+
"output_sample_rate": 24000,
|
| 9 |
+
"decode_upsample_rate": 1920,
|
| 10 |
+
"encode_downsample_rate": 1920,
|
| 11 |
+
"decoder_config": {
|
| 12 |
+
"attention_bias": false,
|
| 13 |
+
"attention_dropout": 0.0,
|
| 14 |
+
"latent_dim": 1024,
|
| 15 |
+
"codebook_dim": 512,
|
| 16 |
+
"codebook_size": 2048,
|
| 17 |
+
"decoder_dim": 1536,
|
| 18 |
+
"hidden_act": "silu",
|
| 19 |
+
"hidden_size": 512,
|
| 20 |
+
"intermediate_size": 1024,
|
| 21 |
+
"layer_scale_initial_scale": 0.01,
|
| 22 |
+
"max_position_embeddings": 8000,
|
| 23 |
+
"head_dim": 64,
|
| 24 |
+
"num_attention_heads": 16,
|
| 25 |
+
"num_hidden_layers": 8,
|
| 26 |
+
"num_key_value_heads": 16,
|
| 27 |
+
"num_quantizers": 16,
|
| 28 |
+
"num_semantic_quantizers": 1,
|
| 29 |
+
"rms_norm_eps": 1e-05,
|
| 30 |
+
"rope_theta": 10000,
|
| 31 |
+
"semantic_codebook_size": 4096,
|
| 32 |
+
"sliding_window": 72,
|
| 33 |
+
"upsample_rates": [
|
| 34 |
+
8,
|
| 35 |
+
5,
|
| 36 |
+
4,
|
| 37 |
+
3
|
| 38 |
+
],
|
| 39 |
+
"upsampling_ratios": [
|
| 40 |
+
2,
|
| 41 |
+
2
|
| 42 |
+
],
|
| 43 |
+
"vector_quantization_hidden_dimension": 512
|
| 44 |
+
},
|
| 45 |
+
"encoder_config": {
|
| 46 |
+
"_frame_rate": 12.5,
|
| 47 |
+
"attention_bias": false,
|
| 48 |
+
"attention_dropout": 0.0,
|
| 49 |
+
"audio_channels": 1,
|
| 50 |
+
"codebook_dim": 256,
|
| 51 |
+
"codebook_size": 2048,
|
| 52 |
+
"compress": 2,
|
| 53 |
+
"dilation_growth_rate": 2,
|
| 54 |
+
"dtype": "float32",
|
| 55 |
+
"head_dim": 64,
|
| 56 |
+
"hidden_act": "gelu",
|
| 57 |
+
"hidden_size": 512,
|
| 58 |
+
"initializer_range": 0.02,
|
| 59 |
+
"intermediate_size": 2048,
|
| 60 |
+
"kernel_size": 7,
|
| 61 |
+
"last_kernel_size": 3,
|
| 62 |
+
"layer_scale_initial_scale": 0.01,
|
| 63 |
+
"max_position_embeddings": 8000,
|
| 64 |
+
"norm_eps": 1e-05,
|
| 65 |
+
"normalize": false,
|
| 66 |
+
"num_attention_heads": 8,
|
| 67 |
+
"num_filters": 64,
|
| 68 |
+
"num_hidden_layers": 8,
|
| 69 |
+
"num_key_value_heads": 8,
|
| 70 |
+
"num_quantizers": 32,
|
| 71 |
+
"num_residual_layers": 1,
|
| 72 |
+
"num_semantic_quantizers": 1,
|
| 73 |
+
"pad_mode": "constant",
|
| 74 |
+
"residual_kernel_size": 3,
|
| 75 |
+
"rope_theta": 10000.0,
|
| 76 |
+
"sampling_rate": 24000,
|
| 77 |
+
"sliding_window": 250,
|
| 78 |
+
"transformers_version": "4.57.0.dev0",
|
| 79 |
+
"trim_right_ratio": 1.0,
|
| 80 |
+
"upsample_groups": 512,
|
| 81 |
+
"upsampling_ratios": [
|
| 82 |
+
8,
|
| 83 |
+
6,
|
| 84 |
+
5,
|
| 85 |
+
4
|
| 86 |
+
],
|
| 87 |
+
"use_cache": false,
|
| 88 |
+
"use_causal_conv": true,
|
| 89 |
+
"use_conv_shortcut": false,
|
| 90 |
+
"use_streaming": false,
|
| 91 |
+
"vector_quantization_hidden_dimension": 256
|
| 92 |
+
},
|
| 93 |
+
"transformers_version": "4.57.3"
|
| 94 |
+
}
|
speech_tokenizer/configuration.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"framework": "pytorch", "task": "feature-extraction", "allow_remote": true}
|
speech_tokenizer/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:836b7b357f5ea43e889936a3709af68dfe3751881acefe4ecf0dbd30ba571258
|
| 3 |
+
size 682293092
|
speech_tokenizer/preprocessor_config.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chunk_length_s": null,
|
| 3 |
+
"feature_extractor_type": "EncodecFeatureExtractor",
|
| 4 |
+
"feature_size": 1,
|
| 5 |
+
"overlap": null,
|
| 6 |
+
"padding_side": "right",
|
| 7 |
+
"padding_value": 0.0,
|
| 8 |
+
"return_attention_mask": true,
|
| 9 |
+
"sampling_rate": 24000
|
| 10 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:09267689b8362020b9763b65dd5be7e086b31e28d72e02837a9e781de9a91bc7
|
| 3 |
+
size 11423986
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
},
|
| 181 |
+
"151665": {
|
| 182 |
+
"content": "<tool_response>",
|
| 183 |
+
"lstrip": false,
|
| 184 |
+
"normalized": false,
|
| 185 |
+
"rstrip": false,
|
| 186 |
+
"single_word": false,
|
| 187 |
+
"special": false
|
| 188 |
+
},
|
| 189 |
+
"151666": {
|
| 190 |
+
"content": "</tool_response>",
|
| 191 |
+
"lstrip": false,
|
| 192 |
+
"normalized": false,
|
| 193 |
+
"rstrip": false,
|
| 194 |
+
"single_word": false,
|
| 195 |
+
"special": false
|
| 196 |
+
},
|
| 197 |
+
"151667": {
|
| 198 |
+
"content": "<think>",
|
| 199 |
+
"lstrip": false,
|
| 200 |
+
"normalized": false,
|
| 201 |
+
"rstrip": false,
|
| 202 |
+
"single_word": false,
|
| 203 |
+
"special": false
|
| 204 |
+
},
|
| 205 |
+
"151668": {
|
| 206 |
+
"content": "</think>",
|
| 207 |
+
"lstrip": false,
|
| 208 |
+
"normalized": false,
|
| 209 |
+
"rstrip": false,
|
| 210 |
+
"single_word": false,
|
| 211 |
+
"special": false
|
| 212 |
+
},
|
| 213 |
+
"151669": {
|
| 214 |
+
"content": "<|audio_start|>",
|
| 215 |
+
"lstrip": false,
|
| 216 |
+
"normalized": false,
|
| 217 |
+
"rstrip": false,
|
| 218 |
+
"single_word": false,
|
| 219 |
+
"special": true
|
| 220 |
+
},
|
| 221 |
+
"151670": {
|
| 222 |
+
"content": "<|audio_end|>",
|
| 223 |
+
"lstrip": false,
|
| 224 |
+
"normalized": false,
|
| 225 |
+
"rstrip": false,
|
| 226 |
+
"single_word": false,
|
| 227 |
+
"special": true
|
| 228 |
+
},
|
| 229 |
+
"151671": {
|
| 230 |
+
"content": "<tts_pad>",
|
| 231 |
+
"lstrip": false,
|
| 232 |
+
"normalized": false,
|
| 233 |
+
"rstrip": false,
|
| 234 |
+
"single_word": false,
|
| 235 |
+
"special": true
|
| 236 |
+
},
|
| 237 |
+
"151672": {
|
| 238 |
+
"content": "<tts_text_bos>",
|
| 239 |
+
"lstrip": false,
|
| 240 |
+
"normalized": false,
|
| 241 |
+
"rstrip": false,
|
| 242 |
+
"single_word": false,
|
| 243 |
+
"special": true
|
| 244 |
+
},
|
| 245 |
+
"151673": {
|
| 246 |
+
"content": "<tts_text_eod>",
|
| 247 |
+
"lstrip": false,
|
| 248 |
+
"normalized": false,
|
| 249 |
+
"rstrip": false,
|
| 250 |
+
"single_word": false,
|
| 251 |
+
"special": true
|
| 252 |
+
},
|
| 253 |
+
"151674": {
|
| 254 |
+
"content": "<tts_text_bos_single>",
|
| 255 |
+
"lstrip": false,
|
| 256 |
+
"normalized": false,
|
| 257 |
+
"rstrip": false,
|
| 258 |
+
"single_word": false,
|
| 259 |
+
"special": true
|
| 260 |
+
},
|
| 261 |
+
"151675": {
|
| 262 |
+
"content": "<|audio_pad|>",
|
| 263 |
+
"lstrip": false,
|
| 264 |
+
"normalized": false,
|
| 265 |
+
"rstrip": false,
|
| 266 |
+
"single_word": false,
|
| 267 |
+
"special": true
|
| 268 |
+
}
|
| 269 |
+
},
|
| 270 |
+
"additional_special_tokens": [
|
| 271 |
+
"<|im_start|>",
|
| 272 |
+
"<|im_end|>",
|
| 273 |
+
"<|object_ref_start|>",
|
| 274 |
+
"<|object_ref_end|>",
|
| 275 |
+
"<|box_start|>",
|
| 276 |
+
"<|box_end|>",
|
| 277 |
+
"<|quad_start|>",
|
| 278 |
+
"<|quad_end|>",
|
| 279 |
+
"<|vision_start|>",
|
| 280 |
+
"<|vision_end|>",
|
| 281 |
+
"<|vision_pad|>",
|
| 282 |
+
"<|image_pad|>",
|
| 283 |
+
"<|video_pad|>",
|
| 284 |
+
"<|audio_start|>",
|
| 285 |
+
"<|audio_end|>",
|
| 286 |
+
"<tts_pad>",
|
| 287 |
+
"<tts_text_bos>",
|
| 288 |
+
"<tts_text_bos_single>",
|
| 289 |
+
"<|audio_pad|>"
|
| 290 |
+
],
|
| 291 |
+
"audio_bos_token": "<|audio_start|>",
|
| 292 |
+
"audio_eos_token": "<|audio_end|>",
|
| 293 |
+
"audio_token": "<|audio_pad|>",
|
| 294 |
+
"bos_token": null,
|
| 295 |
+
"clean_up_tokenization_spaces": false,
|
| 296 |
+
"eos_token": "<|im_end|>",
|
| 297 |
+
"errors": "replace",
|
| 298 |
+
"extra_special_tokens": {
|
| 299 |
+
"audio_bos_token": "<|audio_start|>",
|
| 300 |
+
"audio_eos_token": "<|audio_end|>",
|
| 301 |
+
"audio_token": "<|audio_pad|>",
|
| 302 |
+
"image_token": "<|image_pad|>",
|
| 303 |
+
"video_token": "<|video_pad|>",
|
| 304 |
+
"vision_bos_token": "<|vision_start|>",
|
| 305 |
+
"vision_eos_token": "<|vision_end|>"
|
| 306 |
+
},
|
| 307 |
+
"fix_mistral_regex": true,
|
| 308 |
+
"image_token": "<|image_pad|>",
|
| 309 |
+
"model_max_length": 131072,
|
| 310 |
+
"pad_token": "<|endoftext|>",
|
| 311 |
+
"processor_class": "Qwen3TTSProcessor",
|
| 312 |
+
"split_special_tokens": false,
|
| 313 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 314 |
+
"unk_token": null,
|
| 315 |
+
"video_token": "<|video_pad|>",
|
| 316 |
+
"vision_bos_token": "<|vision_start|>",
|
| 317 |
+
"vision_eos_token": "<|vision_end|>"
|
| 318 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
vocence_config.yaml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Miner + /health metadata. Weights live in this HF repo (no runtime model_id).
|
| 2 |
+
runtime:
|
| 3 |
+
adapter: "qwen3_tts_repo_snapshot"
|
| 4 |
+
device_preference: "cuda"
|
| 5 |
+
dtype: "bfloat16"
|
| 6 |
+
default_language: "English"
|
| 7 |
+
use_flash_attention_2: false
|
| 8 |
+
|
| 9 |
+
generation:
|
| 10 |
+
sample_rate: 24000
|
| 11 |
+
max_seconds: 30
|
| 12 |
+
|
| 13 |
+
limits:
|
| 14 |
+
max_text_chars: 2000
|
| 15 |
+
max_instruction_chars: 600
|
| 16 |
+
default_language: "English"
|