File size: 26,634 Bytes
b162b43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80b2033
b162b43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a9f4026
 
 
10d4c29
a9f4026
 
 
 
 
 
10d4c29
 
 
 
 
 
 
 
 
 
a9f4026
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10d4c29
a9f4026
 
 
 
 
10d4c29
a9f4026
946de3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b162b43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10d4c29
 
 
b162b43
 
 
 
 
 
 
 
 
a9f4026
10d4c29
 
 
 
a9f4026
 
 
 
 
10d4c29
 
 
 
a9f4026
 
10d4c29
b162b43
 
10d4c29
b162b43
10d4c29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deca852
10d4c29
b162b43
 
 
10d4c29
 
 
 
 
 
 
 
 
deca852
 
b162b43
 
10d4c29
 
 
 
 
 
 
 
deca852
 
10d4c29
b162b43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10d4c29
b162b43
a9f4026
10d4c29
 
 
 
 
a9f4026
10d4c29
 
 
 
 
 
 
b162b43
10d4c29
 
 
 
 
 
 
 
b162b43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
946de3e
b162b43
 
 
946de3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deca852
 
946de3e
b162b43
946de3e
 
 
 
 
 
 
deca852
 
b162b43
 
 
 
946de3e
b162b43
 
 
946de3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deca852
 
946de3e
b162b43
946de3e
 
 
 
 
 
 
 
deca852
b162b43
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
"""
# WebAPI文档

` python api_v2.py -a 127.0.0.1 -p 9880 -c GPT_SoVITS/configs/tts_infer.yaml `

## 执行参数:
    `-a` - `绑定地址, 默认"127.0.0.1"`
    `-p` - `绑定端口, 默认9880`
    `-c` - `TTS配置文件路径, 默认"GPT_SoVITS/configs/tts_infer.yaml"`

## 调用:

### 推理

endpoint: `/tts`
GET:
```
http://127.0.0.1:9880/tts?text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_lang=zh&ref_audio_path=archive_jingyuan_1.wav&prompt_lang=zh&prompt_text=我是「罗浮」云骑将军景元。不必拘谨,「将军」只是一时的身份,你称呼我景元便可&text_split_method=cut5&batch_size=1&media_type=wav&streaming_mode=true
```

POST:
```json
{
    "text": "",                   # str.(required) text to be synthesized
    "text_lang: "",               # str.(required) language of the text to be synthesized
    "ref_audio_path": "",         # str.(required) reference audio path
    "aux_ref_audio_paths": [],    # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
    "prompt_text": "",            # str.(optional) prompt text for the reference audio
    "prompt_lang": "",            # str.(required) language of the prompt text for the reference audio
    "top_k": 5,                   # int. top k sampling
    "top_p": 1,                   # float. top p sampling
    "temperature": 1,             # float. temperature for sampling
    "text_split_method": "cut0",  # str. text split method, see text_segmentation_method.py for details.
    "batch_size": 1,              # int. batch size for inference
    "batch_threshold": 0.75,      # float. threshold for batch splitting.
    "split_bucket: True,          # bool. whether to split the batch into multiple buckets.
    "speed_factor":1.0,           # float. control the speed of the synthesized audio.
    "streaming_mode": False,      # bool. whether to return a streaming response.
    "seed": -1,                   # int. random seed for reproducibility.
    "parallel_infer": True,       # bool. whether to use parallel inference.
    "repetition_penalty": 1.35    # float. repetition penalty for T2S model.
    "sample_steps": 32,           # int. number of sampling steps for VITS model V3.
    "super_sampling": False,       # bool. whether to use super-sampling for audio when using VITS model V3.
}
```

RESP:
成功: 直接返回 wav 音频流, http code 200
失败: 返回包含错误信息的 json, http code 400

### 命令控制

endpoint: `/control`

command:
"restart": 重新运行
"exit": 结束运行

GET:
```
http://127.0.0.1:9880/control?command=restart
```
POST:
```json
{
    "command": "restart"
}
```

RESP: 无


### 切换GPT模型

endpoint: `/set_gpt_weights`

GET:
```
http://127.0.0.1:9880/set_gpt_weights?weights_path=GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
```
RESP: 
成功: 返回"success", http code 200
失败: 返回包含错误信息的 json, http code 400


### 切换Sovits模型

endpoint: `/set_sovits_weights`

GET:
```
http://127.0.0.1:9880/set_sovits_weights?weights_path=GPT_SoVITS/pretrained_models/s2G488k.pth
```

RESP: 
成功: 返回"success", http code 200
失败: 返回包含错误信息的 json, http code 400
    
"""
import os
import sys
import traceback
from typing import Generator
import torch

now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.append("%s/GPT_SoVITS" % (now_dir))

import argparse
import subprocess
import wave
import signal
import numpy as np
import soundfile as sf
from fastapi import FastAPI, Request, HTTPException, Response
from fastapi.responses import StreamingResponse, JSONResponse
from fastapi import FastAPI, UploadFile, File
import uvicorn
from io import BytesIO
from tools.i18n.i18n import I18nAuto
from GPT_SoVITS.TTS_infer_pack.TTS import TTS, TTS_Config
from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method_names as get_cut_method_names
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
# print(sys.path)
i18n = I18nAuto()
cut_method_names = get_cut_method_names()

import os
import sys
import traceback
from typing import Generator, Tuple
import requests
import tempfile
import urllib.parse
from pathlib import Path

# Function to check if a path is a URL and download it if needed
def process_audio_path(audio_path) -> Tuple[str, bool]:
    """
    Process an audio path, downloading it if it's a URL.
    
    Args:
        audio_path (str): Path or URL to audio file
        
    Returns:
        Tuple[str, bool]: (local_path, is_temporary)
    """
    if audio_path and (audio_path.startswith('http://') or audio_path.startswith('https://') or 
                      audio_path.startswith('s3://')):
        try:
            # Create temp directory if it doesn't exist
            temp_dir = os.path.join(now_dir, "temp_audio")
            os.makedirs(temp_dir, exist_ok=True)
            
            # Generate a filename from the URL
            parsed_url = urllib.parse.urlparse(audio_path)
            filename = os.path.basename(parsed_url.path)
            if not filename:
                filename = f"temp_audio_{hash(audio_path)}.wav"
            
            # Full path for downloaded file
            local_path = os.path.join(temp_dir, filename)
            
            # Download file
            if audio_path.startswith('s3://'):
                # For S3 URLs, you would use boto3 here
                # This is a placeholder - you'll need to add boto3 import and proper S3 handling
                print(f"Downloading from S3: {audio_path}")
                # Example boto3 code (commented out as boto3 import not in original code)
                # import boto3
                # s3_client = boto3.client('s3')
                # bucket = parsed_url.netloc
                # key = parsed_url.path.lstrip('/')
                # s3_client.download_file(bucket, key, local_path)
                raise NotImplementedError("S3 download not implemented. Add boto3 library and implementation.")
            else:
                # HTTP/HTTPS download
                print(f"Downloading from URL: {audio_path}")
                response = requests.get(audio_path, stream=True)
                response.raise_for_status()
                with open(local_path, 'wb') as f:
                    for chunk in response.iter_content(chunk_size=8192):
                        f.write(chunk)
            
            print(f"Downloaded to: {local_path}")
            return local_path, True  # Return path and flag indicating it's temporary
        except Exception as e:
            print(f"Error downloading audio file: {e}")
            raise Exception(f"Failed to download audio from URL: {e}")
    
    # If not a URL or download failed, return the original path
    return audio_path, False  # Not a temporary file

# Function to process weight files (similar to process_audio_path)
def process_weights_path(weights_path) -> Tuple[str, bool]:
    """
    Process a weights path, downloading it if it's a URL.
    
    Args:
        weights_path (str): Path or URL to weights file
        
    Returns:
        Tuple[str, bool]: (local_path, is_temporary)
    """
    if weights_path and (weights_path.startswith('http://') or weights_path.startswith('https://') or 
                        weights_path.startswith('s3://')):
        try:
            # Create temp directory if it doesn't exist
            temp_dir = os.path.join(now_dir, "temp_weights")
            os.makedirs(temp_dir, exist_ok=True)
            
            # Generate a filename from the URL
            parsed_url = urllib.parse.urlparse(weights_path)
            filename = os.path.basename(parsed_url.path)
            if not filename:
                filename = f"temp_weights_{hash(weights_path)}.pth"
            
            # Full path for downloaded file
            local_path = os.path.join(temp_dir, filename)
            
            # Download file
            if weights_path.startswith('s3://'):
                # S3 implementation placeholder
                print(f"Downloading from S3: {weights_path}")
                raise NotImplementedError("S3 download not implemented. Add boto3 library and implementation.")
            else:
                # HTTP/HTTPS download
                print(f"Downloading weights from URL: {weights_path}")
                response = requests.get(weights_path, stream=True)
                response.raise_for_status()
                with open(local_path, 'wb') as f:
                    for chunk in response.iter_content(chunk_size=8192):
                        f.write(chunk)
            
            print(f"Downloaded weights to: {local_path}")
            return local_path, True  # Return path and flag indicating it's temporary
        except Exception as e:
            print(f"Error downloading weights file: {e}")
            raise Exception(f"Failed to download weights from URL: {e}")
    
    # If not a URL or download failed, return the original path
    return weights_path, False  # Not a temporary file

parser = argparse.ArgumentParser(description="GPT-SoVITS api")
parser.add_argument("-c", "--tts_config", type=str, default="GPT_SoVITS/configs/tts_infer.yaml", help="tts_infer路径")
parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1")
parser.add_argument("-p", "--port", type=int, default="9880", help="default: 9880")
args = parser.parse_args()
config_path = args.tts_config
# device = args.device
port = args.port
host = args.bind_addr
argv = sys.argv

if config_path in [None, ""]:
    config_path = "GPT-SoVITS/configs/tts_infer.yaml"

tts_config = TTS_Config(config_path)
print(tts_config)
tts_pipeline = TTS(tts_config)

APP = FastAPI()
class TTS_Request(BaseModel):
    text: str = None
    text_lang: str = None
    ref_audio_path: str = None
    aux_ref_audio_paths: list = None
    prompt_lang: str = None
    prompt_text: str = ""
    top_k:int = 5
    top_p:float = 1
    temperature:float = 1
    text_split_method:str = "cut5"
    batch_size:int = 1
    batch_threshold:float = 0.75
    split_bucket:bool = True
    speed_factor:float = 1.0
    fragment_interval:float = 0.3
    seed:int = -1
    media_type:str = "wav"
    streaming_mode:bool = False
    parallel_infer:bool = True
    repetition_penalty:float = 1.35
    sample_steps:int = 32
    super_sampling:bool = False

### modify from https://github.com/RVC-Boss/GPT-SoVITS/pull/894/files
def pack_ogg(io_buffer:BytesIO, data:np.ndarray, rate:int):
    with sf.SoundFile(io_buffer, mode='w', samplerate=rate, channels=1, format='ogg') as audio_file:
        audio_file.write(data)
    return io_buffer


def pack_raw(io_buffer:BytesIO, data:np.ndarray, rate:int):
    io_buffer.write(data.tobytes())
    return io_buffer


def pack_wav(io_buffer:BytesIO, data:np.ndarray, rate:int):
    io_buffer = BytesIO()
    sf.write(io_buffer, data, rate, format='wav')
    return io_buffer

def pack_aac(io_buffer:BytesIO, data:np.ndarray, rate:int):
    process = subprocess.Popen([
        'ffmpeg',
        '-f', 's16le',  # 输入16位有符号小端整数PCM
        '-ar', str(rate),  # 设置采样率
        '-ac', '1',  # 单声道
        '-i', 'pipe:0',  # 从管道读取输入
        '-c:a', 'aac',  # 音频编码器为AAC
        '-b:a', '192k',  # 比特率
        '-vn',  # 不包含视频
        '-f', 'adts',  # 输出AAC数据流格式
        'pipe:1'  # 将输出写入管道
    ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    out, _ = process.communicate(input=data.tobytes())
    io_buffer.write(out)
    return io_buffer

def pack_audio(io_buffer:BytesIO, data:np.ndarray, rate:int, media_type:str):
    if media_type == "ogg":
        io_buffer = pack_ogg(io_buffer, data, rate)
    elif media_type == "aac":
        io_buffer = pack_aac(io_buffer, data, rate)
    elif media_type == "wav":
        io_buffer = pack_wav(io_buffer, data, rate)
    else:
        io_buffer = pack_raw(io_buffer, data, rate)
    io_buffer.seek(0)
    return io_buffer



# from https://huggingface.co/spaces/coqui/voice-chat-with-mistral/blob/main/app.py
def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=32000):
    # This will create a wave header then append the frame input
    # It should be first on a streaming wav file
    # Other frames better should not have it (else you will hear some artifacts each chunk start)
    wav_buf = BytesIO()
    with wave.open(wav_buf, "wb") as vfout:
        vfout.setnchannels(channels)
        vfout.setsampwidth(sample_width)
        vfout.setframerate(sample_rate)
        vfout.writeframes(frame_input)

    wav_buf.seek(0)
    return wav_buf.read()


def handle_control(command:str):
    if command == "restart":
        os.execl(sys.executable, sys.executable, *argv)
    elif command == "exit":
        os.kill(os.getpid(), signal.SIGTERM)
        exit(0)


def check_params(req:dict):
    text:str = req.get("text", "")
    text_lang:str = req.get("text_lang", "")
    ref_audio_path:str = req.get("ref_audio_path", "")
    streaming_mode:bool = req.get("streaming_mode", False)
    media_type:str = req.get("media_type", "wav")
    prompt_lang:str = req.get("prompt_lang", "")
    text_split_method:str = req.get("text_split_method", "cut5")

    if ref_audio_path in [None, ""]:
        return JSONResponse(status_code=400, content={"message": "ref_audio_path is required"})
    if text in [None, ""]:
        return JSONResponse(status_code=400, content={"message": "text is required"})
    if (text_lang in [None, ""]) :
        return JSONResponse(status_code=400, content={"message": "text_lang is required"})
    elif text_lang.lower() not in tts_config.languages:
        return JSONResponse(status_code=400, content={"message": f"text_lang: {text_lang} is not supported in version {tts_config.version}"})
    if (prompt_lang in [None, ""]) :
        return JSONResponse(status_code=400, content={"message": "prompt_lang is required"})
    elif prompt_lang.lower() not in tts_config.languages:
        return JSONResponse(status_code=400, content={"message": f"prompt_lang: {prompt_lang} is not supported in version {tts_config.version}"})
    if media_type not in ["wav", "raw", "ogg", "aac"]:
        return JSONResponse(status_code=400, content={"message": f"media_type: {media_type} is not supported"})
    elif media_type == "ogg" and  not streaming_mode:
        return JSONResponse(status_code=400, content={"message": "ogg format is not supported in non-streaming mode"})
    
    if text_split_method not in cut_method_names:
        return JSONResponse(status_code=400, content={"message": f"text_split_method:{text_split_method} is not supported"})

    return None

async def tts_handle(req:dict):
    """
    Text to speech handler.
    """
    
    streaming_mode = req.get("streaming_mode", False)
    return_fragment = req.get("return_fragment", False)
    media_type = req.get("media_type", "wav")
    temp_files = []  # Track temporary files for cleanup

    print(f"----------现在使用的模型版本是:{tts_config.version}----------")

    check_res = check_params(req)
    if check_res is not None:
        return check_res

    if streaming_mode or return_fragment:
        req["return_fragment"] = True
    
    try:
        # Process ref_audio_path (download if it's a URL)
        ref_path, is_temp = process_audio_path(req["ref_audio_path"])
        req["ref_audio_path"] = ref_path
        if is_temp:
            temp_files.append(ref_path)
        
        # Process aux_ref_audio_paths (download if they're URLs)
        if req.get("aux_ref_audio_paths"):
            aux_paths = []
            for aux_path in req["aux_ref_audio_paths"]:
                local_path, is_temp = process_audio_path(aux_path)
                aux_paths.append(local_path)
                if is_temp:
                    temp_files.append(local_path)
            req["aux_ref_audio_paths"] = aux_paths
        
        tts_generator = tts_pipeline.run(req)
        
        if streaming_mode:
            async def streaming_generator(tts_generator:Generator, media_type:str):
                if_frist_chunk = True
                try:
                    for sr, chunk in tts_generator:
                        if if_frist_chunk and media_type == "wav":
                            yield wave_header_chunk(sample_rate=sr)
                            media_type = "raw"
                            if_frist_chunk = False
                        yield pack_audio(BytesIO(), chunk, sr, media_type).getvalue()
                finally:
                    # Clean up temporary files after streaming completes
                    for temp_file in temp_files:
                        try:
                            if os.path.exists(temp_file):
                                os.remove(temp_file)
                                print(f"Removed temporary file: {temp_file}")
                        except Exception as e:
                            print(f"Error removing temporary file {temp_file}: {e}")
            torch.cuda.empty_cache()
            return StreamingResponse(streaming_generator(tts_generator, media_type), media_type=f"audio/{media_type}")
        else:
            sr, audio_data = next(tts_generator)
            audio_data = pack_audio(BytesIO(), audio_data, sr, media_type).getvalue()
            
            # Clean up temporary files after generation completes
            for temp_file in temp_files:
                try:
                    if os.path.exists(temp_file):
                        os.remove(temp_file)
                        print(f"Removed temporary file: {temp_file}")
                except Exception as e:
                    print(f"Error removing temporary file {temp_file}: {e}")

            torch.cuda.empty_cache()
            return Response(audio_data, media_type=f"audio/{media_type}")
    except Exception as e:
        # Clean up temporary files in case of error
        for temp_file in temp_files:
            try:
                if os.path.exists(temp_file):
                    os.remove(temp_file)
                    print(f"Removed temporary file: {temp_file}")
            except Exception as cleanup_error:
                print(f"Error removing temporary file {temp_file}: {cleanup_error}")

        torch.cuda.empty_cache()
        return JSONResponse(status_code=400, content={"message": f"tts failed", "Exception": str(e)})    





@APP.get("/control")
async def control(command: str = None):
    if command is None:
        return JSONResponse(status_code=400, content={"message": "command is required"})
    handle_control(command)



@APP.get("/tts")
async def tts_get_endpoint(
                        text: str = None,
                        text_lang: str = None,
                        ref_audio_path: str = None,
                        aux_ref_audio_paths:list = None,
                        prompt_lang: str = None,
                        prompt_text: str = "",
                        top_k:int = 5,
                        top_p:float = 1,
                        temperature:float = 1,
                        text_split_method:str = "cut0",
                        batch_size:int = 1,
                        batch_threshold:float = 0.75,
                        split_bucket:bool = True,
                        speed_factor:float = 1.0,
                        fragment_interval:float = 0.3,
                        seed:int = -1,
                        media_type:str = "wav",
                        streaming_mode:bool = False,
                        parallel_infer:bool = True,
                        repetition_penalty:float = 1.35,
                        sample_steps:int =32,
                        super_sampling:bool = False
                        ):
    req = {
        "text": text,
        "text_lang": text_lang.lower(),
        "ref_audio_path": ref_audio_path,
        "aux_ref_audio_paths": aux_ref_audio_paths,
        "prompt_text": prompt_text,
        "prompt_lang": prompt_lang.lower(),
        "top_k": top_k,
        "top_p": top_p,
        "temperature": temperature,
        "text_split_method": text_split_method,
        "batch_size":int(batch_size),
        "batch_threshold":float(batch_threshold),
        "speed_factor":float(speed_factor),
        "split_bucket":split_bucket,
        "fragment_interval":fragment_interval,
        "seed":seed,
        "media_type":media_type,
        "streaming_mode":streaming_mode,
        "parallel_infer":parallel_infer,
        "repetition_penalty":float(repetition_penalty),
        "sample_steps":int(sample_steps),
        "super_sampling":super_sampling
    }
    return await tts_handle(req)
                

@APP.post("/tts")
async def tts_post_endpoint(request: TTS_Request):
    req = request.dict()
    return await tts_handle(req)


@APP.get("/set_refer_audio")
async def set_refer_aduio(refer_audio_path: str = None):
    temp_file = None
    try:
        # Process the path (download if it's a URL)
        local_path, is_temp = process_audio_path(refer_audio_path)
        if is_temp:
            temp_file = local_path
        
        # Store reference to the audio
        tts_pipeline.set_ref_audio(local_path)
        
        # If temporary, remove after setting (since TTS pipeline should load the audio into memory)
        if temp_file and os.path.exists(temp_file):
            os.remove(temp_file)
            print(f"Removed temporary file: {temp_file}")
            
        return JSONResponse(status_code=200, content={"message": "success"})
    except Exception as e:
        # Clean up temp file in case of error
        if temp_file and os.path.exists(temp_file):
            try:
                os.remove(temp_file)
                print(f"Removed temporary file: {temp_file}")
            except Exception as cleanup_error:
                print(f"Error removing temporary file {temp_file}: {cleanup_error}")
                
        return JSONResponse(status_code=400, content={"message": f"set refer audio failed", "Exception": str(e)})


# @APP.post("/set_refer_audio")
# async def set_refer_aduio_post(audio_file: UploadFile = File(...)):
#     try:
#         # 检查文件类型,确保是音频文件
#         if not audio_file.content_type.startswith("audio/"):
#             return JSONResponse(status_code=400, content={"message": "file type is not supported"})
        
#         os.makedirs("uploaded_audio", exist_ok=True)
#         save_path = os.path.join("uploaded_audio", audio_file.filename)
#         # 保存音频文件到服务器上的一个目录
#         with open(save_path , "wb") as buffer:
#             buffer.write(await audio_file.read())
            
#         tts_pipeline.set_ref_audio(save_path)
#     except Exception as e:
#         return JSONResponse(status_code=400, content={"message": f"set refer audio failed", "Exception": str(e)})
#     return JSONResponse(status_code=200, content={"message": "success"})

@APP.get("/set_gpt_weights")
async def set_gpt_weights(weights_path: str = None):
    temp_file = None
    try:
        if weights_path in ["", None]:
            return JSONResponse(status_code=400, content={"message": "gpt weight path is required"})
        
        # Process the path (download if it's a URL)
        local_path, is_temp = process_weights_path(weights_path)
        if is_temp:
            temp_file = local_path
            
        # Load the weights
        tts_pipeline.init_t2s_weights(local_path)
        
        # Clean up if it was a temporary file
        # Note: Depending on how init_t2s_weights works, you might need to keep the file
        # If the function loads the file into memory, you can delete it right away
        if temp_file and os.path.exists(temp_file):
            os.remove(temp_file)
            print(f"Removed temporary weights file: {temp_file}")
        
        torch.cuda.empty_cache()    
        return JSONResponse(status_code=200, content={"message": "success"})
    except Exception as e:
        # Clean up temp file in case of error
        if temp_file and os.path.exists(temp_file):
            try:
                os.remove(temp_file)
                print(f"Removed temporary weights file: {temp_file}")
            except Exception as cleanup_error:
                print(f"Error removing temporary file {temp_file}: {cleanup_error}")

        torch.cuda.empty_cache()
        return JSONResponse(status_code=400, content={"message": f"change gpt weight failed", "Exception": str(e)})

@APP.get("/set_sovits_weights")
async def set_sovits_weights(weights_path: str = None):
    temp_file = None
    try:
        if weights_path in ["", None]:
            return JSONResponse(status_code=400, content={"message": "sovits weight path is required"})
        
        # Process the path (download if it's a URL)
        local_path, is_temp = process_weights_path(weights_path)
        if is_temp:
            temp_file = local_path
            
        # Load the weights
        tts_pipeline.init_vits_weights(local_path)
        
        # Clean up if it was a temporary file
        # Note: Depending on how init_vits_weights works, you might need to keep the file
        # If the function loads the file into memory, you can delete it right away
        if temp_file and os.path.exists(temp_file):
            os.remove(temp_file)
            print(f"Removed temporary weights file: {temp_file}")

        torch.cuda.empty_cache()
        return JSONResponse(status_code=200, content={"message": "success"})
    except Exception as e:
        # Clean up temp file in case of error
        if temp_file and os.path.exists(temp_file):
            try:
                os.remove(temp_file)
                print(f"Removed temporary weights file: {temp_file}")
            except Exception as cleanup_error:
                print(f"Error removing temporary file {temp_file}: {cleanup_error}")
                
        torch.cuda.empty_cache()       
        return JSONResponse(status_code=400, content={"message": f"change sovits weight failed", "Exception": str(e)})



if __name__ == "__main__":
    try:
        if host == 'None':   # 在调用时使用 -a None 参数,可以让api监听双栈
            host = None
        uvicorn.run(app=APP, host=host, port=port, workers=1)
    except Exception as e:
        traceback.print_exc()
        os.kill(os.getpid(), signal.SIGTERM)
        exit(0)