import os
import yaml

import whisper
from openai import OpenAI


def set_env():
    # 读取配置文件
    dic = read_yaml("conf.yaml")
    os.environ["OPENAI_API_KEY"] = dic.get("OPENAI_API_KEY")  # 将个人token替换到这个位置
    os.environ["OPENAI_BASE_URL"] = dic.get("OPENAI_BASE_URL")
    os.environ["OPENAI_API_BASE"] = dic.get("OPENAI_API_BASE")


def read_yaml(yaml_file):
    conf_path = os.sep.join([os.path.dirname(os.path.abspath(__file__)), "conf"])
    file = os.sep.join([conf_path, yaml_file])
    dic = yaml.safe_load(open(file, encoding="utf-8"))
    return dic


def test_openai_whisper():
    dic = read_yaml("conf.yaml")
    # 初始化OpenAI对象
    client = OpenAI(base_url=dic.get("OPENAI_BASE_URL"), api_key=dic.get("OPENAI_API_KEY"))
    # 打开一个音频文件
    audio_file1 = open(r"demo1.mp3", 'rb')
    audio_file2 = open(r"demo2.mp3", 'rb')
    # 选择模型，并且转录音频的内容
    res1 = client.audio.transcriptions.create(model="whisper-1", file=audio_file1)
    res2 = client.audio.transcriptions.create(model="whisper-1", file=audio_file2)
    # 翻译为英文
    res3 = client.audio.translations.create(model="whisper-1", file=audio_file2)
    print(f"audio1转录结果为：{res1.text}")
    print(f"audio2转录结果为：{res2.text}")
    print(f"audio2翻译结果为：{res3.text}")


def test_with_whisper():
    # 初始化一个 base 模型
    model = whisper.load_model("base")
    # 传入音频文件，并得到音频输出的文本内容
    res = model.transcribe(r"demo2.mp3")
    print(res["text"])
