import json
import sys
sys.path.append('/usr/lib/python3.8/site-packages')
from flask import Flask, request, jsonify, redirect, send_from_directory,send_file
import requests
import base64
from transformers import SpeechT5Processor, SpeechT5ForSpeechToText
from transformers import VitsModel, AutoTokenizer,pipeline
from transformers import WhisperProcessor, WhisperForConditionalGeneration,AutoModelForSpeechSeq2Seq,QuantoConfig
import numpy as np
import torch
import io
import os
import base64
import soundfile as sf
import edge_tts
import asyncio
import aiofiles
import time
app = Flask(__name__)
quesiton_array=[]

device=['cpu']
has_initialized = False
if not os.path.exists('/tmp/uploads'):
    os.makedirs('/tmp/uploads')

# 使用 app.before_first_request 装饰器来初始化模型

def load_model():
    device ='cpu'
    model_id = "xmzhu/whisper-tiny-zh"
    quanto_config = QuantoConfig(weights="int8")

    model = AutoModelForSpeechSeq2Seq.from_pretrained(
        model_id,
        # torch_dtype=torch.float32,
        device_map="cpu",
        # low_cpu_mem_usage=True,
        quantization_config=quanto_config
    )

    processor = WhisperProcessor.from_pretrained(model_id)
    #freeze(model)
    model = torch.compile(model)
    
    app.transcriber = pipeline(
        "automatic-speech-recognition",
        model=model,
        tokenizer=processor.tokenizer,
        feature_extractor=processor.feature_extractor,
        # max_new_tokens=128,
        # torch_dtype=torch.float32,
    )
    app.transcriber.model.config.forced_decoder_ids = app.transcriber.tokenizer.get_decoder_prompt_ids(language="zh", task="transcribe")
    

def initialize():
    load_model()
		#初始化函数   我们需要初始执行的函数
    print('hello world.................654646546465..')
    pass
    

@app.before_request
def first_request():
    global has_initialized
    if not has_initialized:
        initialize()
        has_initialized = True
        print('hello world...................')


@app.route('/v1/audio/transcriptions', methods=['POST'])
def upload_audio():
    if request.method == 'POST':
        # 获取上传的录音文件
        audio_file = request.files['audio_file']
        if audio_file is None:
            return jsonify({'error': 'Audio file not provided'}), 400

        # 保存音频文件到本地
        file_path = os.path.join('/tmp/uploads', audio_file.filename)
        audio_file.save(file_path)

        # 对音频文件进行转录
        transcription = app.transcriber(file_path)
        print(transcription)

        # 删除上传的音频文件
        os.remove(file_path)

        # 返回转录的文字
        return jsonify({'transcription': transcription}), 200
        
        
@app.route('/v1/chat/completions', methods=['POST'])
def upload_completions():
       # 解析请求数据
    # 构建目标URL
    target_url = 'http://127.0.0.1:8090/v1/chat/completions'
       # 获取JSON数据
    data = request.get_json()
    # 目标服务的URL target_url = 'http://127.0.0.1:8090/process'
    # 发送POST请求到目标服务
    try:
        response = requests.post(target_url, json=data)
        response.raise_for_status()  # 确保请求成功
        # 如果目标服务返回的是JSON，你可以在这里解析它
        result = response.json()
        # 返回一个确认响应
        return result
    except requests.exceptions.RequestException as e:
        # 如果请求失败，返回一个错误响应
        return jsonify({'error': str(e)}), 500
    
  
        
@app.route('/v1/generate_voice', methods=['POST'])
def generate_voice():
    request_data = request.get_json()
    content = request_data.get('content', '')  # 如果没有提供content，则使用空字符串

    voice = 'zh-CN-YunxiNeural'
    output = '4.mp3'
    rate = '-4%'
    volume = '+0%'
    async def my_function():
        tts = edge_tts.Communicate(text=content, voice=voice, rate=rate, volume=volume)
        await tts.save(output)
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    loop.run_until_complete(my_function())
    loop.close()
    return send_file(output)       


@app.route('/v1/audio/speech', methods=['POST'])
def text_to_audio():
    # 解析JSON请求体
    request_data = request.get_json()
    content = request_data.get('content', '')  # 如果没有提供content，则使用空字符串

    print("12345")
    model = VitsModel.from_pretrained("facebook/mms-tts-eng").cpu()
    tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng")
    print(model.config.sampling_rate)
    text = content  # 替换为动态传入的文本
    inputs = tokenizer(text, return_tensors="pt")
    with torch.no_grad():
        output = model(**inputs).waveform      
# 假设output是你的波形数据，model.config.sampling_rate是你的采样率
        sf.write("techno.wav", np.ravel(output.float()), model.config.sampling_rate)
        # 读取WAV文件并转换为base64编码
        with open('/tmp/techno.wav', 'rb') as f:
            base64_audio = base64.b64encode(f.read()).decode('utf-8')    
        # 返回base64编码的音频
        return jsonify({'audio': base64_audio})
    pass
    


@app.route('/v1/embeddings', methods=['POST'])        
def generate_embedding():
    # 解析请求数据
    # 构建目标URL
    target_url = 'http://127.0.0.1:8090/embedding'
       # 获取JSON数据
    data = request.get_json()
    # 目标服务的URL target_url = 'http://127.0.0.1:8090/process'
    # 发送POST请求到目标服务
    try:
        response = requests.post(target_url, json=data)
        response.raise_for_status()  # 确保请求成功
        # 如果目标服务返回的是JSON，你可以在这里解析它
        result = response.json()
        # 返回一个确认响应
        return jsonify({'message': 'Data submitted successfully', 'result': result}), 200
    except requests.exceptions.RequestException as e:
        # 如果请求失败，返回一个错误响应
        return jsonify({'error': str(e)}), 500

@app.route('/v1/start', methods=['GET'])
def start(): 
    return jsonify({'message': 'STAR successfully'}), 200    



# 假设你的静态文件放在名为 'static' 的文件夹中
static_folder = 'tmp'    
@app.route('/doc/<path:filename>')
def download(filename):
    # 返回静态文件
    return send_from_directory(static_folder, filename, as_attachment=True)    


@app.route('/v1/chat/get_fenshu', methods=['POST'])
def get_fenshu():
    global quesiton_array
    user_input = request.get_json().get('text')  # 要转为语音的文字
    questionIndex = request.get_json().get('index')  # 要转为语音的文字
    question=quesiton_array[questionIndex]

   
    user_input =    f"你是一个面试官,你根据面试者的答案给出1到10的分数,并说明理由,面试问题是:{question}。面试者给的答案是:“”“{user_input}”“”"
    default_model="qwen1_5-0_5b-chat-q5_k_m.gguf" 
    default_frequency_penalty=1.5
    default_presence_penalty=1 
    default_temperature=0.8
    default_max_tokens=512
    default_top_k=10
    messages = [
    {"role": "system", "content": "你根据面试者的答案给出1到10的分数."},
    {"role": "user", "content": user_input}
]  
    payload = json.dumps({
            "model": default_model,  # 使用默认模型，除非提供了特定的模型
            "frequency_penalty": default_frequency_penalty,
            "presence_penalty": default_presence_penalty,
            "temperature": default_temperature,
            "max_tokens": default_max_tokens,
            "top_k": default_top_k,
            "messages": messages
    })
    headers = {
            'Content-Type': 'application/json',
            'Authorization': 'Bearer YOUR_API_KEY'  # 替换为您的API密钥
    }
          
    target_url = 'http://127.0.0.1:8090/v1/chat/completions'
    try:
        st = time.time()
        response = requests.post(target_url,headers =headers ,data=payload)
        response.raise_for_status()  # 确保请求成功
        # 如果目标服务返回的是JSON，你可以在这里解析它
        result = response.json()
        end = time.time()
        print(f'Inference time: {end-st} s')
        first_choice = result.get('choices', [{}])[0]
            # 提取message对象中的content
        content = first_choice.get('message', {}).get('content', 'No content found')
        print(content)
        result= {'text': content}
    # 返回转录结果
        return jsonify({'content': content})
    except requests.exceptions.RequestException as e:
        # 如果请求失败，返回一个错误响应
        return jsonify({'error': str(e)}), 500

@app.route('/v1/chat/create_quesion', methods=['POST'])
def crete_qustion():
    global quesiton_array
    user_input = request.get_json().get('text')  # 要转为语音的文字
    print(user_input)
    default_model="qwen1_5-0_5b-chat-q5_k_m.gguf" 
    default_frequency_penalty=1.5
    default_presence_penalty=1 
    default_temperature=0.8
    default_max_tokens=1024
    default_top_k=10
    user_input =    f"你是一个面试官,你会根据内容说10道相关的面试题不用说答案,每个问题要换行输出，只需要说面试题,不用输出其他不相干的内容。内容是：“”“{user_input}”“”"
    messages = [
    {"role": "system", "content": "你会根据内容出面试题目"},
    {"role": "user", "content": user_input}
]  
    payload = json.dumps({
            "model": default_model,  # 使用默认模型，除非提供了特定的模型
            "frequency_penalty": default_frequency_penalty,
            "presence_penalty": default_presence_penalty,
            "temperature": default_temperature,
            "max_tokens": default_max_tokens,
            "top_k": default_top_k,
            "messages": messages
    })
    headers = {
            'Content-Type': 'application/json',
            'Authorization': 'Bearer YOUR_API_KEY'  # 替换为您的API密钥
    }
          
    target_url = 'http://127.0.0.1:8090/v1/chat/completions'
    try:
        response = requests.post(target_url,headers =headers ,data=payload)
        print("response/n")
        print(response)
        response.raise_for_status()  # 确保请求成功
        # 如果目标服务返回的是JSON，你可以在这里解析它
        result = response.json()
        first_choice = result.get('choices', [{}])[0]
            # 提取message对象中的content
        content = first_choice.get('message', {}).get('content', 'No content found')
        print(content)
        # 返回一个确认响应
        quesiton_array = content.split('\n')
        quesiton_array  = [s.strip() for s in quesiton_array  if s.strip() != ""]
    # 返回转录结果
        return jsonify({'content': content})
    except requests.exceptions.RequestException as e:
        # 如果请求失败，返回一个错误响应
        return jsonify({'error': str(e)}), 500
    # 将模型的输出添加至聊天记录中
    #chat_history1.append((input_str, output_str))
    result= {'text': output_str}



# @app.route('/v1/chat/get_quesion_of_index', methods=['POST'])
# def get_quesion_of_index():
#     global quesiton_array
#     index = request.get_json().get('index')  # 要转为语音的文字 
#     # 将模型的输出添加至聊天记录中
#     #chat_history1.append((input_str, output_str))
#     result= quesiton_array[index]
   
#     return jsonify({'content': result})

@app.route('/v1/chat/get_all_quesions', methods=['POST'])
def get_all_quesions():
    global quesiton_array
    # 返回转录结果
    return jsonify({'quesiton_array': quesiton_array})


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=8070)