import asyncio
import datetime
import os
import edge_tts
from flask import Flask, request, jsonify,send_file
from bigdl.llm.transformers import AutoModel
import torch
from transformers import AutoTokenizer
from bigdl.llm.transformers import AutoModelForCausalLM
from bigdl.llm.transformers import AutoModelForSpeechSeq2Seq
import librosa
import time
from transformers import WhisperProcessor
#model_path = "E:\hugginface_model\huggingface\hub\models--THUDM--chatglm3-6b\snapshots\9addbe01105ca1939dd60a0e5866a1812be9daea"
#model_in_4bit = AutoModel.from_pretrained(model_path,load_in_4bit=True,trust_remote_code=True)

#tokenizer = AutoTokenizer.from_pretrained(model_path,trust_remote_code=True)


chat_history = []
chat_history1=[]
quesiton_array=[]

# while True:
#     with torch.inference_mode():
#         user_input = input("Input:")
#         if user_input == "stop": # 当用户输入 "stop" 时停止对话
#           print("Stream Chat with Llama 2 (7B) stopped.")
#           break
#         stream_chat(model=model_in_4bit,
#                     tokenizer=tokenizer,
#                     input_str=user_input,
#                     chat_history=chat_history)
        

app = Flask(__name__)
# 加载模型





@app.route('/v1/audio/transcriptions', methods=['POST'])
def upload_audio():
    if request.method == 'POST':
        # 获取上传的录音文件
        audio_file = request.files['audio_file']
        if audio_file is None:
            return jsonify({'error': 'Audio file not provided'}), 400
        # 读取录音文件并获取采样率
        # 保存音频文件到本地
        timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        file_path = timestamp+audio_file.filename
        print(file_path)
        audio_file.save(file_path)
        data_en, sample_rate_en = librosa.load(file_path, sr=16000)
    audio_model = AutoModelForSpeechSeq2Seq.load_low_bit(pretrained_model_name_or_path="./whisper-medium")
    audio_processor = WhisperProcessor.from_pretrained(pretrained_model_name_or_path="./whisper-medium")                                        
#tokenizer = AutoTokenizer.from_pretrained(save_directory, trust_remote_code=True)
# 定义任务类型
    forced_decoder_ids = audio_processor.get_decoder_prompt_ids(language="chinese", task="transcribe")

    with torch.inference_mode():
    # 为 Whisper 模型提取输入特征
        input_features = audio_processor(data_en, sampling_rate=sample_rate_en, return_tensors="pt").input_features

    # 为转录预测 token id
        st = time.time()
        predicted_ids = audio_model.generate(input_features, forced_decoder_ids=forced_decoder_ids)
        end = time.time()

    # 将 token id 解码为文本
        transcribe_str = audio_processor.batch_decode(predicted_ids, skip_special_tokens=True)

        print(f'Inference time: {end-st} s')
        print('-'*20, 'English Transcription', '-'*20)
        print(transcribe_str)
        return jsonify({'text':transcribe_str})


@app.route('/v1/generate_voice', methods=['POST'])
def generate_voice():
    request_data = request.get_json()
    content = request_data.get('content', '')  # 如果没有提供content,则使用空字符串

    voice = 'zh-CN-YunxiNeural'
    output = '4.mp3'
    rate = '-4%'
    volume = '+0%'
    async def my_function():
        tts = edge_tts.Communicate(text=content, voice=voice, rate=rate, volume=volume)
        await tts.save(output)
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    loop.run_until_complete(my_function())
    loop.close()
    return send_file(output)      

save_directory='./chatglm3-6b-INT8'
#modeldir='./chatglm3-6b-INT4/model.safetensors'
#model_in_4bit.save_low_bit(save_directory)
#del(model_in_4bit)
model_in_4bit=AutoModelForCausalLM.load_low_bit(save_directory,trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(save_directory,
                                          trust_remote_code=True)
SYSTEM_PROMPT = "你是一个面试官,你会根据用户的简历或者JD说一道相关的面试题目,每一轮只说一道面试题,然后等待面试者的回答,打分并且说出原因.请不要回答多余的句子,你需要先告诉用户先输入JD或者简历"
SYSTEM_PROMPT1 = "你是一个面试官,你会根据内容说10道面试题目,并json数组的格式输出,只需要面试题不需要答案"
SYSTEM_PROMPT2 = "你是一个面试官,你会根据面试者的答案给出评分,并说明理由,评分会以json的形式输出"

@app.route('/v1/chat/completions', methods=['POST'])
def completions():
    user_input = request.get_json().get('text')  # 要转为语音的文字
    if user_input == "stop": # 当用户输入 "stop" 时停止对话
        print("Stream Chat stopped.")
   
    #messages=    request.get_json().get('text')  # 要转为语音的文字
    result=chat(model=model_in_4bit,
                    tokenizer=tokenizer,
                    input_str=user_input,
                    chat_history=chat_history)
    # 返回转录结果
    return jsonify({'content': result['text']})


@app.route('/v1/chat/get_fenshu', methods=['POST'])
def get_fenshu():
    global quesiton_array
    user_input = request.get_json().get('text')  # 要转为语音的文字
    questionIndex = request.get_json().get('index')  # 要转为语音的文字
    question=quesiton_array[questionIndex]
    if user_input == "stop": # 当用户输入 "stop" 时停止对话
        print("Stream Chat stopped.")
   
    user_input =    f"你是一个面试官,你会根据面试者的答案给出评分,如果面试者没有说具体的答案或者离题则给零分。并说明理由,问题是:{question}。面试者给的答案是:“”“{user_input}”“”"
    model=model_in_4bit
   
    input_str=user_input
   
       # 通过聊天记录将对话上下文格式化为 prompt
    prompt=user_input
    #prompt = format_prompt1(input_str, chat_history1)
    input_ids = tokenizer.encode(prompt, return_tensors="pt")

    # 预测接下来的 token,同时施加停止的标准
    output_ids = model.generate(input_ids,
                                max_new_tokens=4096,temperature=0.8)

    output_str = tokenizer.decode(output_ids[0][len(input_ids[0]):], # 在生成的 token 中跳过 prompt
                                  skip_special_tokens=True)
    print(f"Response: {output_str.strip()}")

    # 将模型的输出添加至聊天记录中
    #chat_history1.append((input_str, output_str))
    result= {'text': output_str}
    # 返回转录结果
    return jsonify({'content': result['text']})


@app.route('/v1/chat/create_quesion', methods=['POST'])
def crete_qustion():
    global quesiton_array
    user_input = request.get_json().get('text')  # 要转为语音的文字
    if user_input == "stop": # 当用户输入 "stop" 时停止对话
        print("Stream Chat stopped.")
    user_input =    f"你是一个面试官,你会根据内容说10道相关的面试题不用说答案,每个问题要换行输出，只需要说面试题,不用输出其他不相干的内容。内容是：“”“{user_input}”“”"
    model=model_in_4bit
   
    input_str=user_input
   
       # 通过聊天记录将对话上下文格式化为 prompt
    prompt=user_input
    #prompt = format_prompt1(input_str, chat_history1)
    input_ids = tokenizer.encode(prompt, return_tensors="pt")

    # 预测接下来的 token,同时施加停止的标准
    output_ids = model.generate(input_ids,
                                max_new_tokens=4096,temperature=0.8)

    output_str = tokenizer.decode(output_ids[0][len(input_ids[0]):], # 在生成的 token 中跳过 prompt
                                  skip_special_tokens=True)
    print(f"Response: {output_str.strip()}")

    # 将模型的输出添加至聊天记录中
    #chat_history1.append((input_str, output_str))
    result= {'text': output_str}
    quesiton_array.clear()
    quesiton_array = output_str.split('\n')
    quesiton_array  = [s.strip() for s in quesiton_array  if s.strip() != ""]
    # 返回转录结果
    return jsonify({'content': result['text']})


@app.route('/v1/chat/get_quesion_of_index', methods=['POST'])
def get_quesion_of_index():
    global quesiton_array
    index = request.get_json().get('index')  # 要转为语音的文字 
    # 将模型的输出添加至聊天记录中
    #chat_history1.append((input_str, output_str))
    result= quesiton_array[index]
   
    return jsonify({'content': result})

@app.route('/v1/chat/get_all_quesions', methods=['POST'])
def get_all_quesions():
    global quesiton_array
    # 返回转录结果
    return jsonify({'quesiton_array': quesiton_array})




@app.route('/v1/chat/get_culculate_fenshu', methods=['POST'])
def get_culculate_fenshu():
    chat_history1.clear()
    user_input = request.get_json().get('text')  # 要转为语音的文字
    if user_input == "stop": # 当用户输入 "stop" 时停止对话
        print("Stream Chat stopped.")
    user_input =    f"你是一个面试官,你会根据面试者的答案给出评分,并说明理由,评分会以json的形式输出：“”“{user_input}”“”答："
    model=model_in_4bit
   
    input_str=user_input
   
       # 通过聊天记录将对话上下文格式化为 prompt
    prompt=user_input
    #prompt = format_prompt1(input_str, chat_history1)
    input_ids = tokenizer.encode(prompt, return_tensors="pt")

    # 预测接下来的 token,同时施加停止的标准
    output_ids = model.generate(input_ids,
                                max_new_tokens=4096,temperature=0.8)

    output_str = tokenizer.decode(output_ids[0][len(input_ids[0]):], # 在生成的 token 中跳过 prompt
                                  skip_special_tokens=True)
    print(f"Response: {output_str.strip()}")

    # 将模型的输出添加至聊天记录中
    chat_history1.append((input_str, output_str))
    result= {'text': output_str}
    
    # 返回转录结果
    return jsonify({'content': result['text']})
    
#model_in_4bit=AutoModel.load_low_bit(save_directory)
#tokenizer.save_pretrained(save_directory)

def format_prompt1(input_str, chat_history):
    prompt = [f'<s>[INST] <<SYS>>\n{SYSTEM_PROMPT1}\n<</SYS>>\n\n']
    do_strip = False
    for history_input, history_response in chat_history:
        history_input = history_input.strip() if do_strip else history_input
        do_strip = True
        prompt.append(f'{history_input} [/INST] {history_response.strip()} </s><s>[INST] ')
    input_str = input_str.strip() if do_strip else input_str
    prompt.append(f'{input_str} [/INST]')
    return ''.join(prompt)
from transformers import TextIteratorStreamer

def format_prompt(input_str, chat_history):
    prompt = [f'<s>[INST] <<SYS>>\n{SYSTEM_PROMPT}\n<</SYS>>\n\n']
    do_strip = False
    for history_input, history_response in chat_history:
        history_input = history_input.strip() if do_strip else history_input
        do_strip = True
        prompt.append(f'{history_input} [/INST] {history_response.strip()} </s><s>[INST] ')
    input_str = input_str.strip() if do_strip else input_str
    prompt.append(f'{input_str} [/INST]')
    return ''.join(prompt)
from transformers import TextIteratorStreamer
def chat(model, tokenizer, input_str, chat_history):
    # 通过聊天记录将对话上下文格式化为 prompt
    prompt = format_prompt(input_str, chat_history)
    input_ids = tokenizer.encode(prompt, return_tensors="pt")

    # 预测接下来的 token,同时施加停止的标准
    output_ids = model.generate(input_ids,
                                max_new_tokens=4096)

    output_str = tokenizer.decode(output_ids[0][len(input_ids[0]):], # 在生成的 token 中跳过 prompt
                                  skip_special_tokens=True)
    print(f"Response: {output_str.strip()}")

    # 将模型的输出添加至聊天记录中
    chat_history.append((input_str, output_str))
    return {'text': output_str}

def newchat(model, tokenizer, input_str, chat_history):
    # 通过聊天记录将对话上下文格式化为 prompt
    prompt = format_prompt(input_str, chat_history)
    input_ids = tokenizer.encode(prompt, return_tensors="pt")

    # 预测接下来的 token,同时施加停止的标准
    output_ids = model.generate(input_ids,
                                max_new_tokens=32)

    output_str = tokenizer.decode(output_ids[0][len(input_ids[0]):], # 在生成的 token 中跳过 prompt
                                  skip_special_tokens=True)
    print(f"Response: {output_str.strip()}")

    # 将模型的输出添加至聊天记录中
    chat_history.append((input_str, output_str))    
def stream_chat(model, tokenizer, input_str, chat_history):
    # 通过聊天记录将对话上下文格式化为 prompt
    prompt = format_prompt(input_str, chat_history)
    input_ids = tokenizer([prompt], return_tensors='pt')

    streamer = TextIteratorStreamer(tokenizer,
                                    skip_prompt=True, # 在生成的 token 中跳过 prompt
                                    skip_special_tokens=True)

    generate_kwargs = dict(
        input_ids,
        streamer=streamer,
        max_new_tokens=128
    )
    
    # 为了确保对生成文本的非阻塞访问,生成过程应在单独的线程中运行
    from threading import Thread
    
    thread = Thread(target=model.generate, kwargs=generate_kwargs)
    thread.start()

    output_str = []
    print("Response: ", end="")
    for stream_output in streamer:
        output_str.append(stream_output)
        print(stream_output, end="")

    # 将模型的输出添加至聊天记录中
    chat_history.append((input_str, ''.join(output_str)))



if __name__ == '__main__':
    app.run(host="0.0.0.0",port="3060")