
from pydub import AudioSegment
from pydub.silence import detect_silence
from snownlp import SnowNLP
import pandas as pd
import sys,os,shutil
import json
sys.path.append(r"E:\python-knowledge\6项目实战\AI工具\Ollama")   
from OllamaAPI import OllamaAPI
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.video.io.ImageSequenceClip import ImageSequenceClip
import librosa
import soundfile as sf
import subprocess
import re

def split_audio(file_path, output_folder, segment_length=15000):
    # 切分音频
    try:
        audio = AudioSegment.from_file(file_path)
        if not os.path.exists(output_folder):
            os.makedirs(output_folder)

        total_length = len(audio)
        segment_num = 1
        # 获取文件扩展名
        file_extension = os.path.splitext(file_path)[1][1:]
        for start_time in range(0, total_length, segment_length):
            end_time = start_time + segment_length
            if end_time > total_length:
                end_time = total_length
            segment = audio[start_time:end_time]
            output_file = os.path.join(
                output_folder, f"segment_{segment_num}.{file_extension}")
            segment.export(output_file, format=file_extension)
            segment_num += 1
        print("音频切分完成。")
    except Exception as e:
        print(f"发生错误: {e}")

def mp3_to_wav(mp3_path, wav_path):
    """
    将MP3文件转换为WAV文件。

    :param mp3_path: 输入的MP3文件的路径
    :param wav_path: 输出的WAV文件的路径
    """

    # 加载MP3文件
    audio = AudioSegment.from_mp3(mp3_path)
    
    # 保存为WAV格式（默认参数：44100Hz, 16位, 立体声）
    audio.export(wav_path, format="wav")
    
    print(f"转换完成：{wav_path}")

# 示例用法
# mp3_to_wav(r"E:\test\音频资源\佟湘玉.MP3", r"E:\test\音频资源\佟湘玉.wav")

def change_volume(save_path, target_dbfs:int=-22):
    """
    调整音频文件的音量。
    """
    if ".mp3" in save_path.lower():
        audio = AudioSegment.from_mp3(save_path)
    else:
        audio = AudioSegment.from_wav(save_path)
    dBFS_delta = target_dbfs - audio.dBFS
    print(f"调整音量： {dBFS_delta}")
    # 调整音量
    normalized_audio = audio.apply_gain(dBFS_delta)
        
    normalized_audio.export(save_path, format=save_path.split('.')[-1])



def shorten_silence(input_file, output_file):
    # 读取音频文件
    audio = AudioSegment.from_file(input_file)
    # 检测静音片段，阈值为 -40 dBFS，最小静音时长为 100 毫秒
    silence_ranges = detect_silence(audio, min_silence_len=100, silence_thresh=-40)
    new_audio = AudioSegment.empty()
    start_point = 0
    for start, end in silence_ranges:
        # 如果静音时长超过 3 秒
        if (end - start) > 3000:
            new_audio += audio[start_point:start]
            # 只保留 1 秒的静音
            new_audio += AudioSegment.silent(duration=1000)
            start_point = end
        else:
            new_audio += audio[start_point:end]
            start_point = end
    new_audio += audio[start_point:]
    # 导出处理后的音频文件
    new_audio.export(output_file, format=input_file.split('.')[-1])

# 示例调用
# input_file = 'input.wav'
# output_file = 'output.wav'
# shorten_silence(input_file, output_file)

def add_silence_with_pydub(input_file, output_file, silence_duration=2):
    """
    使用 pydub 在音频末尾添加指定时长的静音
    
    参数:
        input_file: 输入音频文件路径
        output_file: 输出音频文件路径
        silence_duration: 静音时长（秒），默认2秒
    """
    try:
        # 加载音频文件
        audio = AudioSegment.from_file(input_file)
        
        # 创建静音片段（单位：毫秒）
        silence = AudioSegment.silent(duration=silence_duration * 1000)
        
        # 拼接音频和静音
        combined = audio + silence
        
        # 导出文件
        combined.export(output_file, format=output_file.split('.')[-1])
        print(f"成功添加 {silence_duration} 秒静音到 {input_file}，输出至 {output_file}")
        return True
    except Exception as e:
        print(f"发生错误: {e}")
        return False


def analyze_tone_and_speed(text):
    # 语句情感分析
    s = SnowNLP(text)
    sentiment = s.sentiments
    if sentiment > 0.7:
        tone = 4
    elif sentiment < 0.3:
        tone = 2
    else:
        tone = 3

    # 情绪细化判断，根据关键词和标点符号
    # negative_keywords = ["球", "屁", "滚", "讨厌", "糟糕"]
    # positive_keywords = ["棒", "好", "喜欢", "开心"]
    # if any(word in text for word in negative_keywords) and "！" in text:
    #     tone = "愤怒/责备" if tone == "消极" else tone
    # elif any(word in text for word in positive_keywords) and "！" in text:
    #     tone = "兴奋/喜悦" if tone == "积极" else tone

    # 语速推断
    if len(text) <= 5 or "！" in text :
        speed = 5
        tone = 5
    elif "？" in text :
        speed = 4
        tone = 4
    elif "……" in text:
        speed = 1
    elif len(text) > 10 and "。" not in text:
        speed = 4
    elif len(text) < 10:
        speed = 4
    else:
        speed = 3

    return {"角色":"无","音调":tone,"语速":speed,"感情":"正常"}

def analyze_tone_and_speed2(text:str,pretext,actor=False):
    ollama_api = OllamaAPI(model_name="qwen3:8b")  # 替换为你想要使用的模型名称
    # if actor:
    #     prompt = """结合上下文分析这句话的语速、音调、感情色彩。同时分析说出这句话的角色，注意结合上下文。角色名分析不出时返回：无。上下文为：%s 。\n 
    #     分析的语句为：%s 。
    #     不要返回分析，只返回内容为JSON格式如下：{"角色":"（角色名/无）","语速"："(1-5)","音调"："（1-5）","感情"："（正常/愤怒/悲伤/高兴/惊讶/恐惧/其他）"} 的内容。"""%(pretext,text)
    # else:
    #     prompt = """结合原文分析这句话的语速、音调、感情色彩。原文为：%s 。\n 
    #     分析的语句为：%s 。
    #     不要返回分析，只返回JSON格式为{语速：(1-5),音调：（1-5）,感情：（一个中文词）} 的内容。"""%(pretext,text)
   
    prompt = """请结合剧本原文，分析剧本台词的语速、音调、感情色彩，并结合场景设定、前后文对话逻辑、角色动作提示、人物关系网推断说话者。若缺乏直接指向性线索（如无动作提示 / 对话承接 / 角色在场证明），请返回「无」。
    剧本原文为:\n %s \n
    分析的剧本台词为：\n%s \n。
    不要返回分析，只返回内容为JSON格式如下：
    {"角色":"（角色名/无）","语句":"（分析的语句）","语速":"(1-5)","音调":"（1-5）","感情":"（正常/愤怒/悲伤/高兴/惊讶/恐惧/其他）"} 的内容。"""%(pretext,text)
    options = {"temperature": 0.7, "top_p": 0.9}  # 可选参数
    respone = ollama_api.generate_text(prompt, options)  # 调用API并获取生成的文本
    result = respone.split("</think>")[-1]
    try:
        voice_json = re.search("\{.+?\}", result,re.DOTALL).group(0) 
        voice_dict = json.loads(voice_json)
        voice_dict["语速"] = int(voice_dict["语速"])
        voice_dict["音调"] = int(voice_dict["音调"])
        voice_dict["语句"] = "“"+voice_dict["语句"] +"”"
    except Exception as e:
        print(e)
        print(">>>>>>>>>>>>>推理异常：",result)
        voice_dict = {"角色":"无","语句":text,"语速":3,"音调":3,"感情":"正常"}

    return voice_dict

def adjust_speed_and_pitch(input_file, output_file, speed_factor=1.0, pitch_factor=0):
    """
    同时调整音频的语速和音调
    
    参数:
    input_file (str): 输入音频文件路径
    output_file (str): 输出音频文件路径
    speed_factor (float): 语速调整因子 (1.0 表示原始速度，>1 加快，<1 减慢)
    pitch_factor (int/float): 音调调整因子 (半音数，0 表示原始音调，正数升高，负数降低)
    """
    # 改变音调的filter没找到，暂时不改
    # if pitch_factor == 0:
    #     cmd = f"ffmpeg -loglevel quiet -y -i {input_file} -filter:a atempo={speed_factor} {output_file}"
    # elif pitch_factor > 0 :
    #     cmd = f'ffmpeg -loglevel quiet -y -i {input_file} -filter:a "atempo={speed_factor},pitch=+{pitch_factor}" {output_file}'
    # else:
    #     cmd = f'ffmpeg -loglevel quiet -y -i {input_file} -filter:a "atempo={speed_factor},pitch=-{abs(pitch_factor)}" {output_file}'
    cmd = f"ffmpeg -loglevel quiet -y -i {input_file} -filter:a atempo={speed_factor} {output_file}"
    result = subprocess.run(
        cmd,
        shell=True,
        capture_output=True,  # 捕获标准输出和错误输出
        text=True  # 以文本形式返回输出
    )
    
    if result.returncode != 0:
        print(f"命令执行失败！返回码: {result.returncode}")
        print(f"错误信息: {result.stderr}")
        print(cmd)
        return False
    else:
        return True
        

def wav_to_aac(wav_path,img_path,output_path, duration=None, fps=24):

    """
    将WAV音频文件转换为aac视频文件。

    """
    # 加载音频文件
    audio_clip = AudioFileClip(wav_path)
    
    # 如果未指定视频时长，则使用音频时长
    if duration is None:
        duration = audio_clip.duration
    
    # 加载图片并创建视频片段
    image_clip = ImageSequenceClip([img_path],durations=10,fps=24)
    
    # 设置音频
    video_clip = image_clip.with_audio(audio_clip)
    
    # 设置视频帧率
    video_clip = video_clip.with_duration(duration)
    video_clip = video_clip.with_fps(fps)
    
    # 写入视频文件
    video_clip.write_videofile(output_path, codec="libx264", audio_codec="aac") # H.264视频编码 # AAC音频编码
    
    # 关闭所有片段
    audio_clip.close()
    image_clip.close()
    video_clip.close()
    
    print(f"视频已成功保存到: {output_path}")

if __name__ == "__main__":
    # 测试示例
    # change_dir= r"E:\txt\音频资源\康辉"
    # for file_name in os.listdir(change_dir):
    #     save_path = os.path.join(change_dir,file_name)
    #     change_volume(save_path)  #调整音量

    # audio1 = AudioSegment.from_mp3(r"E:\txt\音频资源\猪八戒\default_猪八戒.mp3")
    # audio2 = AudioSegment.from_mp3(r"E:\txt\音频资源\张云龙\default_张云龙.mp3")
    # print(audio1.dBFS)  #打印音量
    # print(audio2.dBFS)  #打印音量

    # 处理成视频
    input_dir = r"E:\txt\网游之无限\wav"
    output_dir= r"E:\txt\网游之无限\aac"
    img_path = r"e:\txt\ComfyUI_00013_.png"
    for file_name in os.listdir(r"E:\txt\网游之无限\wav"):
         wav_path = os.path.join(input_dir,file_name)
         acc_path = os.path.join(output_dir,file_name.replace(".wav",".mp4"))
         wav_to_aac(wav_path=wav_path,img_path=img_path,output_path=acc_path, duration=None, fps=24)

    # 调试模型推理
    # f = open(r"E:\txt\网游之无限\网游之无限1.txt",'r',encoding='utf-8')
    # text = ["“我是自觉大有张进，不过，就不知道别人的情况了。”","“胖子，你呢，练得怎么样。”","“胖子，花这大的功夫，你得手了没有。”",
    #         "“青瞳美眉，你好像比以前更加漂亮啦。”,“真的吗，胖子你真会说话。”","“小棠棠，先告诉我，我是不是变得更漂亮了。”",
    #         "“人家这不是出来，找你看看房子吗。”","","“真的吗，昨天是哪个一直叫，我还要，我还要的。”","“怎么啦，看傻啦。”",
    #         "“明玉，甄美眉乔迁，准备什么礼物没。”","“阿棠，想不到你还挺有眼光的。”"]
    # result = analyze_tone_and_speed2(text,f.read())
    # print(result)