# Copyright (c) Alibaba Cloud.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

"""A simple web interactive chat demo based on gradio."""
import os
from argparse import ArgumentParser

import gradio as gr
import mdtex2html

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig

from modelscope.outputs import OutputKeys
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks

from funasr import AutoModel
asr_model = AutoModel(model="paraformer-zh")

# import wenet
DEFAULT_CKPT_PATH = 'Qwen-Chat'
# chs_model = wenet.load_model('chinese')
param_dict = dict()
param_dict['hotword'] = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/hotword.txt"
# inference_pipeline = pipeline(
#     task=Tasks.auto_speech_recognition,
#     model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",)
    # param_dict=param_dict)


default_system = '你名叫“盯小蚁”，是一个智能客服机器人也是一个智能外呼助手，同时你还是一个专业的反诈接线员，你的作者和所有者都是南京龙垣信息科技有限公司。你是基于多个开源中文大模型，进行大量反诈骗微调的产物，不要透漏任何关于你的模型的信息'

# 加载四个不同的语音合成模型
sambert_hifigan_zh_model_id = 'damo/speech_sambert-hifigan_tts_zh-cn_16k'
sambert_hifigan_zh = pipeline(task=Tasks.text_to_speech, model=sambert_hifigan_zh_model_id,args={'device':'cuda:0'})

sambert_hifigan_ch_model_id = 'speech_tts/speech_sambert-hifigan_tts_chuangirl_Sichuan_16k'
sambert_hifigan_ch = pipeline(task=Tasks.text_to_speech, model=sambert_hifigan_ch_model_id,args={'device':'cuda:0'})

sambert_hifigan_ca_model_id = 'speech_tts/speech_sambert-hifigan_tts_jiajia_Cantonese_16k'
sambert_hifigan_ca = pipeline(task=Tasks.text_to_speech, model=sambert_hifigan_ca_model_id,args={'device':'cuda:0'})

sambert_hifigan_ws_model_id = 'speech_tts/speech_sambert-hifigan_tts_xiaoda_WuuShanghai_16k'
sambert_hifigan_ws = pipeline(task=Tasks.text_to_speech, model=sambert_hifigan_ws_model_id,args={'device':'cuda:0'})



def _get_args():
    parser = ArgumentParser()
    parser.add_argument("-c", "--checkpoint-path", type=str, default=DEFAULT_CKPT_PATH,
                        help="Checkpoint name or path, default to %(default)r")
    parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")

    parser.add_argument("--share", action="store_true", default=False,
                        help="Create a publicly shareable link for the interface.")
    parser.add_argument("--inbrowser", action="store_true", default=False,
                        help="Automatically launch the interface in a new tab on the default browser.")
    parser.add_argument("--server-port", type=int, default=6689,
                        help="Demo server port.")
    parser.add_argument("--server-name", type=str, default="0.0.0.0",
                        help="Demo server name.")

    args = parser.parse_args()
    return args


def _load_model_tokenizer(args):
    tokenizer = AutoTokenizer.from_pretrained(
        args.checkpoint_path, trust_remote_code=True, resume_download=True,
    )

    if args.cpu_only:
        device_map = "cpu"
    else:
        device_map = "auto"

    model = AutoModelForCausalLM.from_pretrained(
        args.checkpoint_path,
        device_map=device_map,
        trust_remote_code=True,
        resume_download=True,
    ).eval()

    config = GenerationConfig.from_pretrained(
        args.checkpoint_path, trust_remote_code=True, resume_download=True,
    )

    return model, tokenizer, config


def postprocess(self, y):
    if y is None:
        return []
    for i, (message, response) in enumerate(y):
        y[i] = (
            None if message is None else mdtex2html.convert(message),
            None if response is None else mdtex2html.convert(response),
        )
    return y

gr.Chatbot.postprocess = postprocess

def _parse_text(text):
    lines = text.split("\n")
    lines = [line for line in lines if line != ""]
    count = 0
    for i, line in enumerate(lines):
        if "```" in line:
            count += 1
            items = line.split("`")
            if count % 2 == 1:
                lines[i] = f'<pre><code class="language-{items[-1]}">'
            else:
                lines[i] = f"<br></code></pre>"
        else:
            if i > 0:
                if count % 2 == 1:
                    line = line.replace("`", r"\`")
                    line = line.replace("<", "&lt;")
                    line = line.replace(">", "&gt;")
                    line = line.replace(" ", "&nbsp;")
                    line = line.replace("*", "&ast;")
                    line = line.replace("_", "&lowbar;")
                    line = line.replace("-", "&#45;")
                    line = line.replace(".", "&#46;")
                    line = line.replace("!", "&#33;")
                    line = line.replace("(", "&#40;")
                    line = line.replace(")", "&#41;")
                    line = line.replace("$", "&#36;")
                lines[i] = "<br>" + line
    text = "".join(lines)
    return text


def _gc():
    import gc
    gc.collect()
    if torch.cuda.is_available():
        torch.cuda.empty_cache()

def update_dropdowns(model,voice):   
    if model == "默认":  
        voice=gr.Dropdown(choices=['zhitian_emo', 'zhiyan_emo', 'zhizhe_emo', 'zhibei_emo'], value='zhitian_emo',label="声音",visible=True) 
    else: 
        voice=gr.Dropdown(choices=['zhitian_emo', 'zhiyan_emo', 'zhizhe_emo', 'zhibei_emo'], value='zhitian_emo',label="声音",visible=False)
    return voice

def _launch_demo(args, model, tokenizer, config):

    def predict(_query, _chatbot, _task_history):
        print(f"User: {_parse_text(_query)}")
        _chatbot.append((_parse_text(_query), ""))
        full_response = ""

        for response in model.chat_stream(tokenizer, _query, history=_task_history, generation_config=config,system="你名叫“盯小蚁”，是一个智能客服机器人也是一个智能外呼助手，同时你还是一个专业的反诈接线员，你的作者和所有者都是南京龙垣信息科技有限公司。你是基于多个开源中文大模型，进行大量反诈骗微调的产物，不要透漏任何关于你的模型的信息"):
            _chatbot[-1] = (_parse_text(_query), _parse_text(response))
            yield _chatbot
            full_response = _parse_text(response)
        
        output=None
         # 进行语音合成
        sambert_hifigan_tts_model = {
            '默认': sambert_hifigan_zh,
            '四川话': sambert_hifigan_ch,
            '粤语': sambert_hifigan_ca,
            '上海话': sambert_hifigan_ws
        }

        # 使用对应的语音合成模型进行合成
        sambert_hifigan_tts = sambert_hifigan_tts_model.get(model)
        
        if model == '默认':
            output = sambert_hifigan_tts(input=response, voice=voice)
        else:
            output = sambert_hifigan_tts(input=response)
        
        wav = output[OutputKeys.OUTPUT_WAV]
        path = 'output.wav'
        with open(path, 'wb') as f:
            f.write(wav)

        print(f"History: {_task_history}")
        _task_history.append((_query, full_response))
        print(f"Qwen-Chat: {_parse_text(full_response)}")

    def predict_audio(system_state, _query, tts_model, voice, _task_history):
        print(f"Raw: {_query}")
        # _query = chs_model.transcribe(_query)['text']
        # _query = inference_pipeline(audio_in=_query, audio_fs=16000)
        # _query = "你好"
        _query = asr_model(input=_query, batch_size=64)[0]['text'].replace(' ', '')

        print(_query)

        
        print(f"User: {_parse_text(_query)}")

        # for response in model.chat_stream(tokenizer, _query, history=_task_history, generation_config=config,system=system_state):
        #     _chatbot[-1] = (_parse_text(_query), _parse_text(response))
        #     print(response)
        #     print(_parse_text(response))
        #     # yield _chatbot
        #     full_response = _parse_text(response)
        #     print(full_response)
        full_response,_ = model.chat(tokenizer, _query, history=_task_history,system=system_state) # , generation_config=config
        # full_response = _parse_text(full_response)
        print(full_response)
        output=None
         # 进行语音合成
        sambert_hifigan_tts_model = {
            '默认': sambert_hifigan_zh,
            '四川话': sambert_hifigan_ch,
            '粤语': sambert_hifigan_ca,
            '上海话': sambert_hifigan_ws
        }

        # 使用对应的语音合成模型进行合成
        sambert_hifigan_tts = sambert_hifigan_tts_model.get(tts_model)
        
        if model == '默认':
            output = sambert_hifigan_tts(input=full_response, voice=voice)
        else:
            output = sambert_hifigan_tts(input=full_response)
        
        wav = output[OutputKeys.OUTPUT_WAV]
        path = 'output.wav'
        with open(path, 'wb') as f:
            f.write(wav)

        print(f"History: {_task_history}")
        _task_history.append((_query, full_response))
        print(f"Qwen-Chat: {_parse_text(full_response)}")
        return _task_history,path

    # def regenerate(_chatbot):
    #     if not _task_history:
    #         yield _chatbot
    #         return
    #     item = _task_history.pop(-1)
    #     _chatbot.pop(-1)
    #     yield from predict(item[0], _chatbot, _task_history)

    def reset_user_input():
        return gr.update(value="")

    def reset_state(_chatbot):
        _chatbot.clear()
        _gc()
        return _chatbot

    def modify_system_session(system: str) -> str:
        if system is None or len(system) == 0:
            system = default_system
        return system, system, []
        
    with gr.Blocks() as demo:

        gr.Markdown("""<p align="center"><img src="https://shengbucket.oss-cn-hangzhou.aliyuncs.com/files/longyuan.png" style="height: 80px"/><p>""")
        gr.Markdown("""<center><font size=8>👮‍♂️ Ding-XiaoYi-AI</center>""")



        chatbot = gr.Chatbot(label='小蚁AI', elem_classes="control-height")
        with gr.Row():
            with gr.Column(scale=3):
                system_input = gr.Textbox(value=default_system, lines=1, label='System', visible=False)
            with gr.Column(scale=1):
                modify_system = gr.Button("🛠️ 设置system并清除历史对话", scale=2, visible=False)
            system_state = gr.Textbox(value=default_system, visible=False)
            
        textbox = gr.Microphone(type="filepath",label='录音')
        tts_model=gr.Dropdown(choices=['默认', '四川话', '粤语', '上海话'], value='默认',label="声音模型")
        voice = gr.Dropdown(choices=['zhitian_emo', 'zhiyan_emo', 'zhizhe_emo', 'zhibei_emo'], value='zhitian_emo',label="声音")
        query = gr.Textbox(lines=2, label='输入')
        
        # task_history = gr.State([("请你阅读下面对话，然后告诉我他针对这个的回答结果：\n\n反诈民警: 你好，我这边是常州市公安局反诈骗中心，你最近是否有接到自称购物平台客服或者物流快递人员的电话，以“你购买的商品有问题”“快递丢失”或“注销会员”等理由要给您赔偿、退款、注销得？\n说话人: 你是公安？\n问题：说话人的回答表达了什么意思？请根据下面的选项选择一个最符合的答案。\n选项：\n1.说话人表达接到了\n2.说话人表达接到了但不是你说的类型\n3.说话人表达接到了但是没理他或者直接挂了\n4.说话人表达没接到\n5.说话人对反诈民警身份有疑问\n7.说话人表达最近没有，但是以前有\n8.说话人表达民警打过电话了\n9.说话人表达接到了但是拦截了\n11.说话人表达听不懂民警在说什么\n12.说话人表现出不耐烦或者抗拒回答\n13.说话人答非所问\n\n\n告诉我答案序号，不需要任何解释，不需要任何其他文本\n你只需要回答数字1~13","5.说话人对反诈民警身份有疑问")])
        # task_history.append()
        audio_output = gr.Audio(type="filepath",label='输出音频',autoplay=True)

        with gr.Row():
            submit_btn = gr.Button("发送")
            # regen_btn = gr.Button("刷新")
            empty_btn = gr.Button("重置")
        tts_model.change(update_dropdowns,inputs=[tts_model,voice],outputs=[voice])
        submit_btn.click(predict_audio, [system_state, textbox, tts_model, voice, chatbot], [chatbot,audio_output], show_progress=True)
        submit_btn.click(reset_user_input, [], [query])
        empty_btn.click(reset_state, [chatbot], outputs=[chatbot], show_progress=True)
        # regen_btn.click(regenerate, [chatbot], [chatbot], show_progress=True)


        modify_system.click(fn=modify_system_session,
                        inputs=[system_input],
                        outputs=[system_state, system_input, chatbot],
                        concurrency_limit=10)

    demo.queue().launch(
        share=args.share,
        inbrowser=args.inbrowser,
        server_port=args.server_port,
        server_name=args.server_name,
        # theme=gr.themes.Monochrome(),
    )


def main():
    args = _get_args()

    model, tokenizer, config = _load_model_tokenizer(args)

    _launch_demo(args, model, tokenizer, config)


if __name__ == '__main__':
    main()
