import os
import time
import json
import torch
import requests
import gradio as gr
import pandas as pd
from argparse import ArgumentParser
from flask import Flask, request, jsonify

from rich.console import Console
from rich.table import Table
from rich import print

from dagent_llm import LLM
llm = LLM("ollama")
llm.redundancy = 2

speech_df = pd.read_csv("speech.csv", encoding='utf-8', dtype=str)
speech_df = speech_df.fillna("空")
print(f"speech_df: \n{speech_df}")

def next_speech(now_q_id,now_a_id,speech_df,redundance=0):
    same = False
    if redundance > 0:
        print(f"now_q_id: {now_q_id}, now_a_id: {now_a_id}")

    # Get Next Q
    next_q_id_r = speech_df.loc[(speech_df['q_id'] == now_q_id) & (speech_df['a_id'] == now_a_id), 'next_q_id'].values
    if len(next_q_id_r) == 0:
        # find q_id's a_id==all
        next_q_id_r = speech_df.loc[(speech_df['q_id'] == now_q_id) & (speech_df['a_id'] == "-1"), 'next_q_id'].values
        if len(next_q_id_r) == 0:    
            print(f"Not found next_q_id, now_q_id: {now_q_id}, now_a_id: {now_a_id}")
            return "", "", same
        else:
            next_q_id = next_q_id_r[0]
    else:
        next_q_id = next_q_id_r[0]

    if redundance > 0:
        print(f"next_q_id: {next_q_id}")

    # Get Next Q Prefix
    next_q_prefix_r = speech_df.loc[(speech_df['q_id'] == now_q_id) & (speech_df['a_id'] == now_a_id), 'prefix'].values
    if len(next_q_prefix_r) == 0:
        print(f"Not found next_q_prefix, now_q_id: {now_q_id}, now_a_id: {now_a_id}")
        return "", "", same
    else:
        next_q_prefix = next_q_prefix_r[0]

    # Get Next Q Prefix First
    next_q_prefix_first_r = speech_df.loc[(speech_df['q_id'] == next_q_id), 'prefix_first'].values
    if len(next_q_prefix_first_r) == 0:
        print(f"Not found next_q_prefix_first, next_q_id: {next_q_id}")
        return "", "", same
    else:
        next_q_prefix_first = next_q_prefix_first_r[0]

    if next_q_prefix == "空":
        next_q_prefix = next_q_prefix_first
    else:
        next_q_prefix_r = speech_df.loc[(speech_df['q_id'] == next_q_prefix), 'text'].values
        if len(next_q_prefix_r) == 0:
            print(f"Not found next_q_prefix, next_q_id: {next_q_id}")
            return "", "", same
        else:
            next_q_prefix = next_q_prefix_r[0]

    if next_q_prefix == "空":
        next_q_prefix = ""
    if redundance > 0:
        print(f"next_q_prefix: {next_q_prefix}")
    next_q_text = speech_df.loc[(speech_df['q_id'] == next_q_id) , 'text'].values[0]
    if next_q_id == now_q_id:
        same=True
    next_text = f"{next_q_prefix}{next_q_text}"
    return next_text, next_q_id, same

def check_stop(q_id,speech_df):
    next_q_id_r = speech_df.loc[(speech_df['q_id'] == q_id), 'next_q_id'].values
    if len(next_q_id_r) == 0:
        # 意外中止
        return True,False
    else:
        next_q_id = next_q_id_r[0]
        all_a_id_r = speech_df.loc[(speech_df['q_id'] == next_q_id), 'a_id'].values
        if len(all_a_id_r) == 0 or all_a_id_r[0] == "-1":
            # 结束语完成
            return True,True
        return False,False

def get_aid_from_input(q_text,q_id,a_input,speech_df):
    prompt = f"请你阅读下面对话，然后告诉我他针对这个的回答结果：\n反诈民警: {q_text}\n说话人:{a_input}\n问题：说话人的回答表达了什么意思？请根据下面的选项选择一个最符合的答案。"
    a_texts = speech_df.loc[(speech_df['q_id'] == q_id), 'a'].values
    selected_a = llm_choose(prompt, a_texts, history=None, need_reason=True, multiple=False, add_to_history=False)
    selected_a_index = speech_df.loc[(speech_df['q_id'] == q_id) & (speech_df['a'] == selected_a), 'a_id'].values[0]
    print(f"selected_a: {selected_a}, selected_a_index: {selected_a_index}")
    return selected_a_index,selected_a

# 选择题
def llm_choose(prompt, options, history=None, need_reason=False, multiple=False, add_to_history=False):
    r = llm.choose(options, prompt, "说话人回答类别", need_reason=need_reason, multiple=multiple)
    choice = r["choice"]
    reason = r["reason"]
    print(f"choice: {choice}")
    if need_reason:
        print(f"reason: {reason}")
    return choice

def predict(chat_history, user_input, q_id):
    # Last Question
    last_q = chat_history[-1][1]
    # Now Answer
    last_a = user_input
    print(f"Now Q id: {q_id}")
    print(f"Last Q: {last_q}")
    print(f"Last A: {last_a}")
    a_id,selected_a = get_aid_from_input(last_q,q_id,user_input,speech_df)
    print(f"a_id: {a_id}")
    stop,success = check_stop(q_id,speech_df)
    if stop and not success:
        print("对话中止。")
        result = f"对话中止。"
    else:
        A = speech_df.loc[(speech_df['q_id'] == q_id) & (speech_df['a_id'] == a_id), 'a'].values[0]
        next_text, q_id, same = next_speech(q_id, a_id, speech_df)    
    # 更新聊天历史
    bot_response = next_text
    chat_history.append((user_input,bot_response))
    print(f"Output QID: {q_id}")
    Q_all_response = ""
    return chat_history,q_id,f"识别结果：{selected_a}\n识别编号： {a_id}"

def reset():
    Q_first = "喂，你好这个是常州市反诈骗中心，你最近是否有接到自称购物平台客服或者物流快递人员的电话，以“你购买的商品有问题”“快递丢失”或“注销会员”等理由要给您赔偿、退款、注销得？"
    return [(None, "您正在参与常州小蚁AI测试 👮‍♂️"),(None, Q_first)],[(None, "您正在参与常州小蚁AI测试 👮‍♂️"),(None, Q_first)],"","1","",""

def emotion_predict(chat_history):
    Q_all_tiny = ""
    for _history in chat_history:
        Q_all_tiny += f"\n反诈民警:{_history[0]}\n说话人{_history[1]}\n"
    Q_all = f"请你阅读下面对话，然后告诉我他针对这个的回答结果：{Q_all_tiny} 问题：说话人的情绪如何？告诉我是否有暴躁、愤怒或者厌烦的情绪即可（尽可能可能简短）。"
    Q_all_response = chat(Q_all, history=None)
    return Q_all_response

def all_context_predict(chat_history):
    Q_all_tiny = ""
    for _history in chat_history:
        Q_all_tiny += f"\n反诈民警:{_history[0]}\n说话人{_history[1]}\n"
    Q_all = f"请你阅读下面对话，然后告诉我他针对这个的回答结果：{Q_all_tiny} 问题：根据上述说话人是否有进行可能被诈骗的敏感操作或者有被诈骗可能？（尽可能可能简短）"
    Q_all_response = chat(Q_all, history=None)
    return Q_all_response

def chat(prompt, history=None):
    r = llm.chat(prompt)
    return r.content

def csv_file_changed(csv_file):
    speech_df = pd.read_csv(csv_file, encoding='utf-8', dtype=str)
    speech_df = speech_df.fillna("空")

    # set speech_df to global
    print(f"Set speech_df to global: {speech_df}")
    return speech_df

# Gradio 界面
def setup_gradio_interface():
    with gr.Blocks() as demo:
        gr.Markdown("""<p align="center"><img src="https://shengbucket.oss-cn-hangzhou.aliyuncs.com/files/longyuan.png" style="height: 80px"/><p>""")
        # fileupload
        # with gr.Row():
            # file_upload_button = gr.Button("📁上传")
        gr.Markdown("""<center><font size=8>👮‍♂️ Ding-XiaoYi-AI</center>""")
        Q_first = "喂，你好这个是常州市反诈骗中心，你最近是否有接到自称购物平台客服或者物流快递人员的电话，以“你购买的商品有问题”“快递丢失”或“注销会员”等理由要给您赔偿、退款、注销得？"
        chat_history = gr.State(value=[(None, Q_first)])
        # csv_file = gr.File(label="上传文件", type="filepath") 
        with gr.Row():
            user_input = gr.Textbox(label="文字输入您的回复", placeholder="请在这里输入...")
            with gr.Column():
                submit_button = gr.Button("📞发送")
                reset_button = gr.Button("🔨重置")

        chatbot_display = gr.Chatbot(label='小蚁AI', elem_classes="control-height",value=[(None, "您正在参与常州小蚁AI测试 👮‍♂️"),(None, Q_first)])
        # q_id是一个变量，字符串可变，可编辑
        q_id = gr.Textbox(label="问题ID", placeholder="在这里输入...",value="1")
        now_result = gr.Textbox(label="当前内容判断结果", placeholder="在这里输入...",value="")
        with gr.Row():
            emo_button = gr.Button("🤬情绪评估")
            all_button = gr.Button("💯风险评估")
        Q_emo_response = gr.Textbox(label="情绪评估", placeholder="等待输入中...",value="")
        all_context_response = gr.Textbox(label="风险评估", placeholder="等待输入中...",value="")
        # df = ""
        # file_upload_button.click(
        #     fn=csv_file_changed,
        #     inputs=[csv_file], 
        #     # outputs=[df]
        # )
        # print(f"In main: {df}")
        submit_button.click(
            fn=predict,
            inputs=[chat_history, user_input,q_id], 
            outputs=[chatbot_display,q_id,now_result]
        )
        reset_button.click(
            fn=reset,
            inputs=[], 
            outputs=[chatbot_display,chat_history,now_result,q_id,Q_emo_response,all_context_response]
        )
        emo_button.click(
            fn=emotion_predict,
            inputs=[chat_history], 
            outputs=[Q_emo_response]
        )
        all_button.click(
            fn=all_context_predict,
            inputs=[chat_history], 
            outputs=[all_context_response]
        )
    demo.launch(share=True,inbrowser=True,server_port=6698,server_name="0.0.0.0")

# 启动 Gradio 应用
if __name__ == '__main__':
    setup_gradio_interface()