File size: 5,743 Bytes
44d964a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8670926
 
 
 
 
 
 
 
44d964a
 
 
 
1acaa19
 
 
44d964a
 
 
 
 
 
 
d20404a
 
1acaa19
 
 
 
 
295de00
 
 
44d964a
 
 
 
 
 
 
 
 
 
 
 
1acaa19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33ee1bb
1acaa19
 
 
 
 
 
 
 
 
 
33ee1bb
 
1acaa19
 
 
 
 
 
 
 
44d964a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
{
    "models": [
        // Configuration for the built-in models. You can remove any of these 
        // if you don't want to use the default models.
        {
            "name": "tiny",
            "url": "tiny" 
        },
        {
            "name": "base",
            "url": "base"
        },
        {
            "name": "small",
            "url": "small"
        },
        {
            "name": "medium",
            "url": "medium"
        },
        {
            "name": "large",
            "url": "large"
        },
        {
            "name": "large-v2",
            "url": "large-v2"
        },
        // Uncomment to add custom Japanese models
        //{
        //    "name": "whisper-large-v2-mix-jp",
        //    "url": "vumichien/whisper-large-v2-mix-jp",
        //    // The type of the model. Can be "huggingface" or "whisper" - "whisper" is the default.
        //    // HuggingFace models are loaded using the HuggingFace transformers library and then converted to Whisper models.
        //    "type": "huggingface",
        //},
        //{
        //    "name": "local-model",
        //    "url": "path/to/local/model",
        //},
        //{
        //    "name": "remote-model",
        //    "url": "https://example.com/path/to/model",
        //}
    ],
    // Configuration options that will be used if they are not specified in the command line arguments.

    // * WEBUI options *

    // Maximum audio file length in seconds, or -1 for no limit. Ignored by CLI.
    "input_audio_max_duration": 600,
    // True to share the app on HuggingFace.
    "share": false,
    // The host or IP to bind to. If None, bind to localhost.
    "server_name": null,
    // The port to bind to.
    "server_port": 7860,
    // The number of workers to use for the web server. Use -1 to disable queueing.
    "queue_concurrency_count": 1,
    // Whether or not to automatically delete all uploaded files, to save disk space
    "delete_uploaded_files": true,

    // * General options *

    // The default implementation to use for Whisper. Can be "whisper" or "faster-whisper".
    "whisper_implementation": "whisper",

    // The default model name.
    "default_model_name": "medium",
    // The default VAD.
    "default_vad": "silero-vad",
    // A commma delimited list of CUDA devices to use for parallel processing. If None, disable parallel processing.
    "vad_parallel_devices": "",
    // The number of CPU cores to use for VAD pre-processing.
    "vad_cpu_cores": 1,
    // The number of seconds before inactivate processes are terminated. Use 0 to close processes immediately, or None for no timeout.
    "vad_process_timeout": 1800,
    // True to use all available GPUs and CPU cores for processing. Use vad_cpu_cores/vad_parallel_devices to specify the number of CPU cores/GPUs to use.
    "auto_parallel": false,
    // Directory to save the outputs (CLI will use the current directory if not specified)
    "output_dir": null,
    // The path to save model files; uses ~/.cache/whisper by default
    "model_dir": null,
    // Device to use for PyTorch inference, or Null to use the default device
    "device": null,
    // Whether to print out the progress and debug messages
    "verbose": true,
    // Whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')
    "task": "transcribe",
    // Language spoken in the audio, specify None to perform language detection
    "language": null,
    // The window size (in seconds) to merge voice segments
    "vad_merge_window": 5,
    // The maximum size (in seconds) of a voice segment
    "vad_max_merge_size": 30,
    // The padding (in seconds) to add to each voice segment
    "vad_padding": 1,
    // The window size of the prompt to pass to Whisper
    "vad_prompt_window": 3,
    // Temperature to use for sampling
    "temperature": 0,
    // Number of candidates when sampling with non-zero temperature
    "best_of": 5,
    // Number of beams in beam search, only applicable when temperature is zero
    "beam_size": 5,
    // Optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search
    "patience": 1,
    // Optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple length normalization by default
    "length_penalty": null,
    // Comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations
    "suppress_tokens": "-1",
    // Optional text to provide as a prompt for the first window
    "initial_prompt": null,
    // If True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop
    "condition_on_previous_text": true,
    // Whether to perform inference in fp16; True by default
    "fp16": true,
    // The compute type used by faster-whisper. Can be "int8". "int16" or "float16".
    "compute_type": "float16",
    // Temperature to increase when falling back when the decoding fails to meet either of the thresholds below
    "temperature_increment_on_fallback": 0.2,
    // If the gzip compression ratio is higher than this value, treat the decoding as failed
    "compression_ratio_threshold": 2.4,
    // If the average log probability is lower than this value, treat the decoding as failed
    "logprob_threshold": -1.0,
    // If the probability of the <no-speech> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence
    "no_speech_threshold": 0.6
}