Pengwei Li commited on
Commit
64fcc17
1 Parent(s): d217001

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +91 -0
README.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: fairseq
3
+ task: text-to-speech
4
+ tags:
5
+ - fairseq
6
+ - audio
7
+ - text-to-speech
8
+ language: en
9
+ datasets:
10
+ - mtedx
11
+ - covost2
12
+ - europarl_st
13
+ - voxpopuli
14
+ widget:
15
+ - example_title: Common Voice sample 1
16
+ src: https://huggingface.co/facebook/xm_transformer_600m-es_en-multi_domain/resolve/main/common_voice_es_19966634.flac
17
+ ---
18
+ ## unit_hifigan_mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj_dur
19
+
20
+ Speech-to-speech translation model from fairseq S2UT ([paper](https://arxiv.org/abs/2204.02967)/[code](https://github.com/facebookresearch/fairseq/blob/main/examples/speech_to_speech/docs/enhanced_direct_s2st_discrete_units.md)):
21
+ - Spanish-English
22
+ - Trained on mTEDx, CoVoST 2, Europarl-ST and VoxPopuli
23
+
24
+ ## Usage
25
+
26
+ ```python
27
+ import json
28
+ import os
29
+ from pathlib import Path
30
+
31
+ import IPython.display as ipd
32
+ from fairseq import hub_utils
33
+ from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
34
+ from fairseq.models.speech_to_text.hub_interface import S2THubInterface
35
+ from fairseq.models.text_to_speech import CodeHiFiGANVocoder
36
+ from fairseq.models.text_to_speech.hub_interface import VocoderHubInterface
37
+
38
+ from huggingface_hub import snapshot_download
39
+ import torchaudio
40
+
41
+ cache_dir = os.getenv("HUGGINGFACE_HUB_CACHE")
42
+
43
+ #models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
44
+ # "facebook/xm_transformer_s2ut_800m-es-en-st-asr-bt_h1_2022",
45
+ # arg_overrides={"config_yaml": "config.yaml", "task": "speech_to_text"},
46
+ # cache_dir=cache_dir,
47
+ # )
48
+ # model = models[0].cpu()
49
+ # cfg["task"].cpu = True
50
+ # generator = task.build_generator([model], cfg)
51
+
52
+
53
+ # # requires 16000Hz mono channel audio
54
+ # audio, _ = torchaudio.load("/Users/lpw/git/api-inference-community/docker_images/fairseq/tests/samples/sample2.flac")
55
+
56
+ # sample = S2THubInterface.get_model_input(task, audio)
57
+ # unit = S2THubInterface.get_prediction(task, model, generator, sample)
58
+
59
+ # speech synthesis
60
+ library_name = "fairseq"
61
+ cache_dir = (
62
+ cache_dir or (Path.home() / ".cache" / library_name).as_posix()
63
+ )
64
+ cache_dir = snapshot_download(
65
+ f"facebook/unit_hifigan_mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj_dur", cache_dir=cache_dir, library_name=library_name
66
+ )
67
+
68
+ x = hub_utils.from_pretrained(
69
+ cache_dir,
70
+ "model.pt",
71
+ ".",
72
+ archive_map=CodeHiFiGANVocoder.hub_models(),
73
+ config_yaml="config.json",
74
+ fp16=False,
75
+ is_vocoder=True,
76
+ )
77
+
78
+ with open(f"{x['args']['data']}/config.json") as f:
79
+ vocoder_cfg = json.load(f)
80
+ assert (
81
+ len(x["args"]["model_path"]) == 1
82
+ ), "Too many vocoder models in the input"
83
+
84
+ vocoder = CodeHiFiGANVocoder(x["args"]["model_path"][0], vocoder_cfg)
85
+ tts_model = VocoderHubInterface(vocoder_cfg, vocoder)
86
+
87
+ tts_sample = tts_model.get_model_input(unit)
88
+ wav, sr = tts_model.get_prediction(tts_sample)
89
+
90
+ ipd.Audio(wav, rate=sr)
91
+ ```