|
import argparse |
|
import os |
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--port', '-p', |
|
type=int, |
|
default=9090, |
|
help="Websocket port to run the server on.") |
|
parser.add_argument('--backend', '-b', |
|
type=str, |
|
default='faster_whisper', |
|
help='Backends from ["tensorrt", "faster_whisper", "openvino"]') |
|
parser.add_argument('--faster_whisper_custom_model_path', '-fw', |
|
type=str, default=None, |
|
help="Custom Faster Whisper Model") |
|
parser.add_argument('--trt_model_path', '-trt', |
|
type=str, |
|
default=None, |
|
help='Whisper TensorRT model path') |
|
parser.add_argument('--trt_multilingual', '-m', |
|
action="store_true", |
|
help='Boolean only for TensorRT model. True if multilingual.') |
|
parser.add_argument('--trt_py_session', |
|
action="store_true", |
|
help='Boolean only for TensorRT model. Use python session or cpp session, By default uses Cpp.') |
|
parser.add_argument('--omp_num_threads', '-omp', |
|
type=int, |
|
default=1, |
|
help="Number of threads to use for OpenMP") |
|
parser.add_argument('--no_single_model', '-nsm', |
|
action='store_true', |
|
help='Set this if every connection should instantiate its own model. Only relevant for custom model, passed using -trt or -fw.') |
|
args = parser.parse_args() |
|
|
|
if args.backend == "tensorrt": |
|
if args.trt_model_path is None: |
|
raise ValueError("Please Provide a valid tensorrt model path") |
|
|
|
if "OMP_NUM_THREADS" not in os.environ: |
|
os.environ["OMP_NUM_THREADS"] = str(args.omp_num_threads) |
|
|
|
from whisper_live.server import TranscriptionServer |
|
server = TranscriptionServer() |
|
server.run( |
|
"0.0.0.0", |
|
port=args.port, |
|
backend=args.backend, |
|
faster_whisper_custom_model_path=args.faster_whisper_custom_model_path, |
|
whisper_tensorrt_path=args.trt_model_path, |
|
trt_multilingual=args.trt_multilingual, |
|
trt_py_session=args.trt_py_session, |
|
single_model=not args.no_single_model, |
|
) |
|
|