|
|
|
|
|
""" |
|
CosyVoice gRPC backβend β updated to mirror the FastAPI logic |
|
* loads CosyVoice2 with TRT / FP16 first (falls back to CosyVoice) |
|
* inference_zero_shot β adds stream=False + speed |
|
* inference_instruct β keeps original βspeakerβIDβ path |
|
* inference_instruct2 β new: promptβaudio + speed (no speakerβID) |
|
""" |
|
|
|
import io, tempfile, requests, soundfile as sf, torchaudio |
|
import os |
|
import sys |
|
from concurrent import futures |
|
import argparse |
|
import logging |
|
import grpc |
|
import numpy as np |
|
import torch |
|
|
|
import cosyvoice_pb2 |
|
import cosyvoice_pb2_grpc |
|
|
|
|
|
|
|
|
|
logging.getLogger("matplotlib").setLevel(logging.WARNING) |
|
logging.basicConfig(level=logging.INFO, |
|
format="%(asctime)s %(levelname)s %(message)s") |
|
|
|
ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) |
|
sys.path.extend([ |
|
f"{ROOT_DIR}/../../..", |
|
f"{ROOT_DIR}/../../../third_party/Matcha-TTS", |
|
]) |
|
|
|
from cosyvoice.cli.cosyvoice import CosyVoice2 |
|
|
|
|
|
|
|
|
|
|
|
def _bytes_to_tensor(wav_bytes: bytes) -> torch.Tensor: |
|
""" |
|
Convert int16 littleβendian PCM bytes β torch.FloatTensor in range [β1,1] |
|
""" |
|
speech = torch.from_numpy( |
|
np.frombuffer(wav_bytes, dtype=np.int16) |
|
).unsqueeze(0).float() / (2 ** 15) |
|
return speech |
|
|
|
|
|
def _yield_audio(model_output): |
|
""" |
|
Generator that converts CosyVoice output β protobuf Response messages. |
|
""" |
|
for seg in model_output: |
|
pcm16 = (seg["tts_speech"].numpy() * (2 ** 15)).astype(np.int16) |
|
resp = cosyvoice_pb2.Response(tts_audio=pcm16.tobytes()) |
|
yield resp |
|
|
|
import urllib.parse |
|
|
|
def _load_prompt_from_url(url: str, target_sr: int = 16_000) -> torch.Tensor: |
|
""" |
|
Download *url* (wav / mp3 / flac) β mono torch.FloatTensor [1,β―T] @ target_sr. |
|
The temp file is removed before return. |
|
""" |
|
resp = requests.get(url, timeout=10) |
|
resp.raise_for_status() |
|
|
|
|
|
ext = os.path.splitext(urllib.parse.urlparse(url).path)[1] or ".tmp" |
|
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f: |
|
f.write(resp.content) |
|
tmp_path = f.name |
|
|
|
try: |
|
wav, sr = torchaudio.load(tmp_path) |
|
if wav.ndim > 1: |
|
wav = wav.mean(dim=0, keepdim=True) |
|
if sr != target_sr: |
|
wav = torchaudio.functional.resample(wav, sr, target_sr) |
|
return wav |
|
finally: |
|
try: |
|
os.remove(tmp_path) |
|
except Exception as e: |
|
logging.warning("Could not delete temp file %s: %s", tmp_path, e) |
|
|
|
|
|
|
|
|
|
class CosyVoiceServiceImpl(cosyvoice_pb2_grpc.CosyVoiceServicer): |
|
def __init__(self, args): |
|
|
|
try: |
|
self.cosyvoice = CosyVoice2(args.model_dir, |
|
load_jit=False, |
|
load_trt=True, |
|
fp16=True) |
|
logging.info("Loaded CosyVoice2 (TRT / FP16).") |
|
except Exception: |
|
raise TypeError("No valid CosyVoice model found!") |
|
|
|
|
|
|
|
|
|
def Inference(self, request, context): |
|
"""Route to the correct model call based on the oneof field present.""" |
|
|
|
if request.HasField("sft_request"): |
|
logging.info("Received SFT inference request") |
|
mo = self.cosyvoice.inference_sft( |
|
request.sft_request.tts_text, |
|
request.sft_request.spk_id |
|
) |
|
yield from _yield_audio(mo) |
|
return |
|
|
|
|
|
if request.HasField("zero_shot_request"): |
|
logging.info("Received zeroβshot inference request") |
|
zr = request.zero_shot_request |
|
tmp_path = None |
|
|
|
try: |
|
|
|
if zr.prompt_audio.startswith(b'http'): |
|
prompt = _load_prompt_from_url(zr.prompt_audio.decode('utfβ8')) |
|
else: |
|
|
|
prompt = _bytes_to_tensor(zr.prompt_audio) |
|
|
|
|
|
speed = getattr(zr, "speed", 1.0) |
|
mo = self.cosyvoice.inference_zero_shot( |
|
zr.tts_text, |
|
zr.prompt_text, |
|
prompt, |
|
stream=False, |
|
speed=speed, |
|
) |
|
|
|
finally: |
|
|
|
if tmp_path and os.path.exists(tmp_path): |
|
try: |
|
os.remove(tmp_path) |
|
except Exception as e: |
|
logging.warning("Could not remove temp file %s: %s", tmp_path, e) |
|
|
|
yield from _yield_audio(mo) |
|
return |
|
|
|
|
|
if request.HasField("cross_lingual_request"): |
|
logging.info("Received crossβlingual inference request") |
|
cr = request.cross_lingual_request |
|
tmp_path = None |
|
|
|
try: |
|
if cr.prompt_audio.startswith(b'http'): |
|
prompt = _load_prompt_from_url(cr.prompt_audio.decode('utfβ8')) |
|
else: |
|
prompt = _bytes_to_tensor(cr.prompt_audio) |
|
|
|
mo = self.cosyvoice.inference_cross_lingual( |
|
cr.tts_text, |
|
prompt |
|
) |
|
|
|
finally: |
|
if tmp_path and os.path.exists(tmp_path): |
|
try: |
|
os.remove(tmp_path) |
|
except Exception as e: |
|
logging.warning("Could not remove temp file %s: %s", |
|
tmp_path, e) |
|
|
|
yield from _yield_audio(mo) |
|
return |
|
|
|
|
|
|
|
if request.HasField("instruct_request"): |
|
ir = request.instruct_request |
|
|
|
|
|
if ('prompt_audio' in ir.DESCRIPTOR.fields_by_name |
|
and ir.HasField("prompt_audio")): |
|
logging.info("Received instructβ2 inference request") |
|
|
|
tmp_path = None |
|
try: |
|
if ir.prompt_audio.startswith(b'http'): |
|
prompt = _load_prompt_from_url(ir.prompt_audio.decode('utfβ8')) |
|
else: |
|
|
|
prompt = _bytes_to_tensor(ir.prompt_audio) |
|
|
|
speed = getattr(ir, "speed", 1.0) |
|
mo = self.cosyvoice.inference_instruct2( |
|
ir.tts_text, |
|
ir.instruct_text, |
|
prompt, |
|
stream=False, |
|
speed=speed |
|
) |
|
|
|
finally: |
|
if tmp_path and os.path.exists(tmp_path): |
|
try: |
|
os.remove(tmp_path) |
|
except Exception as e: |
|
logging.warning("Could not remove temp file %s: %s", |
|
tmp_path, e) |
|
|
|
|
|
|
|
|
|
else: |
|
logging.info("Received instruct inference request") |
|
mo = self.cosyvoice.inference_instruct( |
|
ir.tts_text, |
|
ir.spk_id, |
|
ir.instruct_text |
|
) |
|
|
|
yield from _yield_audio(mo) |
|
return |
|
|
|
|
|
context.abort(grpc.StatusCode.INVALID_ARGUMENT, |
|
"Unsupported request type in oneof field.") |
|
|
|
|
|
|
|
|
|
|
|
def serve(args): |
|
server = grpc.server( |
|
futures.ThreadPoolExecutor(max_workers=args.max_conc), |
|
maximum_concurrent_rpcs=args.max_conc |
|
) |
|
cosyvoice_pb2_grpc.add_CosyVoiceServicer_to_server( |
|
CosyVoiceServiceImpl(args), server |
|
) |
|
server.add_insecure_port(f"0.0.0.0:{args.port}") |
|
server.start() |
|
logging.info("CosyVoice gRPC server listening on 0.0.0.0:%d", args.port) |
|
server.wait_for_termination() |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--port", type=int, default=8000) |
|
parser.add_argument("--max_conc", type=int, default=4, |
|
help="maximum concurrent requests / threads") |
|
parser.add_argument("--model_dir", type=str, |
|
default="pretrained_models/CosyVoice2-0.5B", |
|
help="local path or ModelScope repo id") |
|
serve(parser.parse_args()) |