yujiepan commited on
Commit
cdc2d96
·
verified ·
1 Parent(s): 1f0e7c4

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ base_model:
4
+ - k2-fsa/OmniVoice
5
+ ---
6
+
7
+ This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [k2-fsa/OmniVoice](https://huggingface.co/k2-fsa/OmniVoice).
8
+
9
+ | File path | Size |
10
+ |------|------|
11
+ | model.safetensors | 5.5MB |
12
+ | audio_tokenizer/model.safetensors | 6.7MB |
13
+
14
+
15
+ ### Example usage:
16
+
17
+ ```python
18
+ from omnivoice import OmniVoice
19
+ import torch
20
+ import torchaudio
21
+
22
+ model_id = "tiny-random/omnivoice"
23
+ model = OmniVoice.from_pretrained(
24
+ model_id,
25
+ dtype=torch.bfloat16,
26
+ )
27
+ audio = model.generate(
28
+ text="Hello, this is test example 1",
29
+ instruct="low pitch, british accent",
30
+ )
31
+ torchaudio.save("/tmp/example1.wav", audio[0], 24000)
32
+
33
+ audio2 = model.generate(
34
+ text="Hello, this is test example 2",
35
+ ref_audio="/tmp/example1.wav",
36
+ ref_text="Hello, this is test example 1",
37
+ )
38
+ torchaudio.save("/tmp/example2.wav", audio2[0], 24000)
39
+ ```
40
+
41
+ ### Codes to create this repo:
42
+
43
+ <details>
44
+ <summary>Click to expand</summary>
45
+
46
+ ```python
47
+ import torch
48
+ import os
49
+
50
+ from transformers import (
51
+ set_seed,
52
+ AutoConfig,
53
+ AutoTokenizer,
54
+ HiggsAudioV2TokenizerModel,
55
+ AutoFeatureExtractor,
56
+ )
57
+ from huggingface_hub import hf_hub_download
58
+ import json
59
+ from omnivoice import OmniVoice, OmniVoiceConfig
60
+
61
+ source_model_id = "k2-fsa/OmniVoice"
62
+ save_folder = "/tmp/tiny-random/omnivoice"
63
+
64
+ set_seed(42)
65
+ tokenizer = AutoTokenizer.from_pretrained(source_model_id, trust_remote_code=True)
66
+ tokenizer.save_pretrained(save_folder)
67
+
68
+ with open(
69
+ hf_hub_download(source_model_id, filename="audio_tokenizer/config.json", repo_type="model"),
70
+ "r",
71
+ encoding="utf-8",
72
+ ) as f:
73
+ config_dict = json.load(f)
74
+ config_dict["acoustic_model_config"].update(
75
+ {
76
+ "decoder_hidden_size": 32,
77
+ "encoder_hidden_size": 4,
78
+ "hidden_size": 4,
79
+ "codebook_dim": 8,
80
+ }
81
+ )
82
+ config_dict["semantic_model_config"].update(
83
+ {
84
+ "conv_dim": [8] * 7,
85
+ "hidden_size": 16 * 4,
86
+ "intermediate_size": 64,
87
+ "num_attention_heads": 4,
88
+ "num_hidden_layers": 2,
89
+ }
90
+ )
91
+ os.makedirs(os.path.join(save_folder, "audio_tokenizer"), exist_ok=True)
92
+ with open(os.path.join(save_folder, "audio_tokenizer/config.json"), "w", encoding="utf-8") as f:
93
+ json.dump(config_dict, f, ensure_ascii=False, indent=2)
94
+ audio_tokenizer = HiggsAudioV2TokenizerModel(
95
+ AutoConfig.from_pretrained(os.path.join(save_folder, "audio_tokenizer"))
96
+ )
97
+ audio_tokenizer.save_pretrained(os.path.join(save_folder, "audio_tokenizer"))
98
+ print(audio_tokenizer)
99
+ set_seed(42)
100
+ with torch.no_grad():
101
+ for name, p in sorted(audio_tokenizer.named_parameters()):
102
+ torch.nn.init.normal_(p, 0, 0.2)
103
+ print(name, p.shape)
104
+
105
+ feature_extractor = AutoFeatureExtractor.from_pretrained(source_model_id, subfolder="audio_tokenizer")
106
+ feature_extractor.save_pretrained(os.path.join(save_folder, "audio_tokenizer"))
107
+
108
+ with open(
109
+ hf_hub_download(source_model_id, filename="config.json", repo_type="model"),
110
+ "r",
111
+ encoding="utf-8",
112
+ ) as f:
113
+ config_dict = json.load(f)
114
+ config_dict["llm_config"].update(
115
+ {
116
+ "hidden_size": 8,
117
+ "head_dim": 32,
118
+ "intermediate_size": 32,
119
+ "num_attention_heads": 8,
120
+ "num_key_value_heads": 4,
121
+ "num_hidden_layers": 4,
122
+ "max_window_layers": 2,
123
+ "layer_types": ["full_attention"] * 4,
124
+ }
125
+ )
126
+ config = OmniVoiceConfig.from_dict(config_dict)
127
+ model = OmniVoice(config).eval()
128
+ set_seed(42)
129
+ with torch.no_grad():
130
+ for name, p in sorted(model.named_parameters()):
131
+ torch.nn.init.normal_(p, 0, 0.2)
132
+ print(name, p.shape)
133
+ model.save_pretrained(save_folder)
134
+ ```
135
+
136
+ </details>
audio_tokenizer/config.json ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "acoustic_model_config": {
3
+ "codebook_dim": 8,
4
+ "codebook_loss_weight": 1.0,
5
+ "codebook_size": 1024,
6
+ "commitment_loss_weight": 0.25,
7
+ "decoder_hidden_size": 32,
8
+ "downsampling_ratios": [
9
+ 8,
10
+ 5,
11
+ 4,
12
+ 2,
13
+ 3
14
+ ],
15
+ "encoder_hidden_size": 4,
16
+ "hidden_size": 4,
17
+ "hop_length": 960,
18
+ "model_type": "dac",
19
+ "n_codebooks": 9,
20
+ "quantizer_dropout": 0,
21
+ "sampling_rate": 16000,
22
+ "upsampling_ratios": [
23
+ 8,
24
+ 5,
25
+ 4,
26
+ 2,
27
+ 3
28
+ ]
29
+ },
30
+ "architectures": [
31
+ "HiggsAudioV2TokenizerModel"
32
+ ],
33
+ "block_dilations": [
34
+ 1,
35
+ 1
36
+ ],
37
+ "channel_ratios": [
38
+ 1,
39
+ 1
40
+ ],
41
+ "codebook_dim": 64,
42
+ "codebook_size": 1024,
43
+ "downsample_factor": 320,
44
+ "dtype": "float32",
45
+ "initializer_range": 0.02,
46
+ "kernel_size": 3,
47
+ "model_type": "higgs_audio_v2_tokenizer",
48
+ "sample_rate": 24000,
49
+ "semantic_model_config": {
50
+ "activation_dropout": 0.1,
51
+ "apply_spec_augment": true,
52
+ "attention_dropout": 0.1,
53
+ "bos_token_id": 1,
54
+ "classifier_proj_size": 256,
55
+ "conv_bias": false,
56
+ "conv_dim": [
57
+ 8,
58
+ 8,
59
+ 8,
60
+ 8,
61
+ 8,
62
+ 8,
63
+ 8
64
+ ],
65
+ "conv_kernel": [
66
+ 10,
67
+ 3,
68
+ 3,
69
+ 3,
70
+ 3,
71
+ 2,
72
+ 2
73
+ ],
74
+ "conv_pos_batch_norm": false,
75
+ "conv_stride": [
76
+ 5,
77
+ 2,
78
+ 2,
79
+ 2,
80
+ 2,
81
+ 2,
82
+ 2
83
+ ],
84
+ "ctc_loss_reduction": "sum",
85
+ "ctc_zero_infinity": false,
86
+ "do_stable_layer_norm": false,
87
+ "eos_token_id": 2,
88
+ "feat_extract_activation": "gelu",
89
+ "feat_extract_norm": "group",
90
+ "feat_proj_dropout": 0.0,
91
+ "feat_proj_layer_norm": true,
92
+ "final_dropout": 0.1,
93
+ "hidden_act": "gelu",
94
+ "hidden_dropout": 0.1,
95
+ "hidden_size": 64,
96
+ "initializer_range": 0.02,
97
+ "intermediate_size": 64,
98
+ "layer_norm_eps": 1e-05,
99
+ "layerdrop": 0.1,
100
+ "mask_feature_length": 10,
101
+ "mask_feature_min_masks": 0,
102
+ "mask_feature_prob": 0.0,
103
+ "mask_time_length": 10,
104
+ "mask_time_min_masks": 2,
105
+ "mask_time_prob": 0.0,
106
+ "model_type": "hubert",
107
+ "num_attention_heads": 4,
108
+ "num_conv_pos_embedding_groups": 16,
109
+ "num_conv_pos_embeddings": 128,
110
+ "num_feat_extract_layers": 7,
111
+ "num_hidden_layers": 2,
112
+ "pad_token_id": 0,
113
+ "use_weighted_layer_sum": false,
114
+ "vocab_size": 32
115
+ },
116
+ "semantic_sample_rate": 16000,
117
+ "strides": [
118
+ 1,
119
+ 1
120
+ ],
121
+ "target_bandwidths": [
122
+ 0.5,
123
+ 1,
124
+ 1.5,
125
+ 2
126
+ ],
127
+ "transformers_version": "5.5.0",
128
+ "unit_kernel_size": 3
129
+ }
audio_tokenizer/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0478f3bc248298916cb9fc1f9acfae4d39dc325ca337830340911179969ce50
3
+ size 6666464
audio_tokenizer/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "feature_extractor_type": "DacFeatureExtractor",
3
+ "feature_size": 1,
4
+ "hop_length": 960,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 24000
9
+ }
chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "OmniVoice"
4
+ ],
5
+ "audio_codebook_weights": [
6
+ 8,
7
+ 8,
8
+ 6,
9
+ 6,
10
+ 4,
11
+ 4,
12
+ 2,
13
+ 2
14
+ ],
15
+ "audio_mask_id": 1024,
16
+ "audio_vocab_size": 1025,
17
+ "bos_token_id": null,
18
+ "dtype": "float32",
19
+ "eos_token_id": 151645,
20
+ "llm_config": {
21
+ "_name_or_path": "",
22
+ "architectures": [
23
+ "Qwen3ForCausalLM"
24
+ ],
25
+ "attention_bias": false,
26
+ "attention_dropout": 0.0,
27
+ "bos_token_id": 151643,
28
+ "chunk_size_feed_forward": 0,
29
+ "dtype": "float32",
30
+ "eos_token_id": 151645,
31
+ "head_dim": 32,
32
+ "hidden_act": "silu",
33
+ "hidden_size": 8,
34
+ "id2label": {
35
+ "0": "LABEL_0",
36
+ "1": "LABEL_1"
37
+ },
38
+ "initializer_range": 0.02,
39
+ "intermediate_size": 32,
40
+ "is_encoder_decoder": false,
41
+ "label2id": {
42
+ "LABEL_0": 0,
43
+ "LABEL_1": 1
44
+ },
45
+ "layer_types": [
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention"
50
+ ],
51
+ "max_position_embeddings": 40960,
52
+ "max_window_layers": 2,
53
+ "model_type": "qwen3",
54
+ "num_attention_heads": 8,
55
+ "num_hidden_layers": 4,
56
+ "num_key_value_heads": 4,
57
+ "output_attentions": false,
58
+ "output_hidden_states": false,
59
+ "pad_token_id": null,
60
+ "problem_type": null,
61
+ "return_dict": true,
62
+ "rms_norm_eps": 1e-06,
63
+ "rope_parameters": {
64
+ "rope_theta": 1000000,
65
+ "rope_type": "default"
66
+ },
67
+ "sliding_window": null,
68
+ "tie_word_embeddings": true,
69
+ "use_cache": true,
70
+ "use_sliding_window": false,
71
+ "vocab_size": 151676
72
+ },
73
+ "model_type": "omnivoice",
74
+ "num_audio_codebook": 8,
75
+ "pad_token_id": 151643,
76
+ "transformers_version": "5.5.0"
77
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d626de7b879f2d8cf439f1627481afbfd67ad288a67d59cc54dce41bcea5fc62
3
+ size 5495352
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:408f669b7e2b045fdf54201d815bd364e6667dbd845115da81239c40bc6dcfd1
3
+ size 11423986
tokenizer_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|denoise|>",
10
+ "<|lang_start|>",
11
+ "<|lang_end|>",
12
+ "<|instruct_start|>",
13
+ "<|instruct_end|>",
14
+ "<|text_start|>",
15
+ "<|text_end|>"
16
+ ],
17
+ "is_local": false,
18
+ "model_max_length": 131072,
19
+ "pad_token": "<|endoftext|>",
20
+ "split_special_tokens": false,
21
+ "tokenizer_class": "Qwen2Tokenizer",
22
+ "unk_token": null
23
+ }