# coding:utf-8
import os
import sys
import torch
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
if project_root not in sys.path:
    sys.path.append(project_root)
import streamlit as st
import librosa
import pandas as pd
from ipex_llm.transformers import AutoModelForCausalLM, AutoModelForSpeechSeq2Seq
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, AutoProcessor, WhisperProcessor
import time

def parse_plan_to_table(plan_text: str) -> pd.DataFrame:
    # 解析文本到 DataFrame 表格
    lines = plan_text.strip().split('\n')
    table_data = [line.split('|')[1:-1] for line in lines[2:]]  # 忽略标题和边框行
    df = pd.DataFrame(table_data,
                      columns=["日期", "地点", "行程计划", "交通方式", "餐饮安排", "住宿安排", "费用估算", "备注"])
    return df


def load_whisper_model(config):
    model, processor = config.audio_to_text_model
    model.config.forced_decoder_ids = None
    if "whisper_model" not in st.session_state:
        st.success(f"语音模型正在加载...")
        st.session_state.whisper_model = model
    if "whisper_processor" not in st.session_state:
        st.session_state.whisper_processor = processor


def audio_text_local(mp3_output):
    model = st.session_state.whisper_model
    processor = st.session_state.whisper_processor
    # model, processor = config.audio_to_text_model

    model.config.forced_decoder_ids = None
    # 读取音频
    data_en, sample_rate_en = librosa.load(mp3_output, sr=16000)

    # 获取解码 ID
    forced_decoder_ids = processor.get_decoder_prompt_ids(language="Chinese", task="transcribe")

    with torch.inference_mode():
        input_features = processor(data_en, sampling_rate=sample_rate_en, return_tensors="pt").input_features
        st_time = time.time()
        predicted_ids = model.generate(input_features, forced_decoder_ids=forced_decoder_ids)
        end_time = time.time()
        prompt = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]

        print(f'Inference time: {end_time - st_time} s')
        print('-' * 20, 'Chinese Transcription', '-' * 20)
        print(prompt)
    return prompt


def audio_text_api(mp3_output):
    from dwspark.config import Config
    from dwspark.models import Audio2Text
    # 加载系统环境变量：SPARKAI_APP_ID、SPARKAI_API_KEY、SPARKAI_API_SECRET
    # config1 = Config1()
    # 自定义key写入
    config = Config('', '', '') # 科大讯飞星火模型api
    a2t = Audio2Text(config)

    # 对生成上锁，预防公有变量出现事务问题，但会降低程序并发性能。
    prompt = a2t.gen_text(mp3_output)
    print('-' * 20, 'API返回结果', '-' * 20)
    print(prompt)
    result = prompt
    return result


def audio_text(mp3_output, config, mode):
    config.set_mode(mode)
    if config.use_audio_to_text_api:
        return audio_text_api(mp3_output)
    else:
        load_whisper_model(config)
        return audio_text_local(mp3_output)
