import torch
import torchaudio
from torchaudio.pipelines import MMS_FA as bundle
from typing import List
import utils
import os
import csv

# 加载模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = bundle.get_model()
model.to(device)

tokenizer = bundle.get_tokenizer()
aligner = bundle.get_aligner()

# 对其方法
def compute_alignments(waveform: torch.Tensor, transcript: List[str]):
    with torch.inference_mode():
        emission, _ = model(waveform.to(device))
        token_spans = aligner(emission[0], tokenizer(transcript))
        
    return emission, token_spans

def spliter_to_pharse(audio_url,text,save_path_sph,save_path_stm):

    # 创建save_path文件夹
    os.makedirs(save_path_sph, exist_ok=True)
    # 获取STM文件的目录路径
    save_dir_stm = os.path.dirname(save_path_stm)
    # 确保保存STM文件的目录存在
    os.makedirs(save_dir_stm, exist_ok=True)

    # 读入待处理的语音文件和字幕
    url = audio_url
    waveform, sample_rate = torchaudio.load(url)
    waveform = waveform[0:1]
    assert sample_rate == bundle.sample_rate

    text_normalized = text

    # 切分字幕得到增量pos列表
    increment_pos = utils.get_split_pos(text_normalized,jump=3)

    # 对其语音与字幕
    transcript = text_normalized.split()
    emission, token_spans = compute_alignments(waveform, transcript)
    num_frames = emission.size(1)

    # 创建输出字幕文件
    with open(save_path_stm, 'w', newline='') as outfile:
        writer = csv.writer(outfile)
        # 输出的csv的头
        writer.writerow(['path','prompt','transcript'])
        # writer.writerow(['caption'])

        # 分别保存增量语音与增量字幕
        for i,pos in enumerate(increment_pos):
            # 裁剪并保存音频
            # 确定本次裁剪的头和尾
            pos = pos if pos < len(token_spans) else len(token_spans)
            
            if pos <= increment_pos[len(increment_pos)//3]:
                _pos = 1 
            elif pos <= increment_pos[2*len(increment_pos)//3]:
                _pos = increment_pos[len(increment_pos)//3]+1
            else:
                _pos = increment_pos[2*len(increment_pos)//3]+1
            end = int(waveform.size(1) / num_frames *token_spans[pos-1][-1].end)
            start = int(waveform.size(1) / num_frames *token_spans[_pos-1][-1].start)
            # 组装本次保存路径
            out_audio = os.path.join(save_path_sph,f'increment_segement_{i}.wav')
            torchaudio.save(out_audio, waveform[:,start:end],sample_rate)
            # 裁剪并保存字幕
            if _pos == 1:
                text_out = text.split()[:pos]
            else:
                text_out = text.split()[:pos]
                text_out.insert(_pos-1,"<cut>")
            sentence = ' '.join(text_out)
            # 输出的csv的具体内容
            if _pos == 1:
                writer.writerow([out_audio,"",sentence])
            else:
                writer.writerow([out_audio,sentence.split("<cut>")[0],sentence.split("<cut>")[1]])
            # writer.writerow([sentence])
    
    print(f"音频文件:{audio_url}增量化处理完毕，保存于:{save_path_sph}")
