# coding = utf-8
# @Time    : 2024-10-20  10:02:51
# @Author  : zhaosheng@nuaa.edu.cn
# @Describe: AI Customer Service Evaluation System DEMO

# ========================================================================================
# Example:
# python ./app/customer_service_evaluation_system/main.py --input_dir /datasets_hdd/customer_downloadwavs/20241015/ --output_dir 20241015
# ========================================================================================

import os
import csv
import argparse
import subprocess
import pandas as pd
from tqdm import tqdm
from datetime import datetime
from rich.table import Table
from rich.console import Console
# import print
from rich import print
console = Console()

import torchaudio

from dspeech import STT
from dagent_llm import LLM
from dguard import DguardModel as dm
from utils import analyze_emotion
from cfg import NOTES, OPTIONS

parser = argparse.ArgumentParser(description="AI Customer Service Evaluation System")
parser.add_argument("--input_dir", type=str, required=True, help="Directory containing audio files")
parser.add_argument("--output_dir", type=str, required=True, help="Output directory for results")
parser.add_argument("--basis_csv_path", type=str, default="rating_basis_simple.csv", help="Path to the evaluation basis table")
args = parser.parse_args()

INPUT_DIR = args.input_dir
OUTPUT_DIR = args.output_dir
BASIS_CSV_PATH = args.basis_csv_path

# Initialize models
stt_model = STT(model_name="paraformer-zh") # , hotwords="为临服务"
dm_model = dm(
    embedding_model_names=["eres2net_cn_common_200k", "campp_cn_common_200k"],
    device="cuda",
    channel=0,
    start_time=0,
    diar_max_num_spks=4,
)
dagent = LLM("ollama")

def init_folder(input_dir,output_dir,basis_csv_path,print_table=True):
    if not os.path.exists(input_dir):
        console.print(f"[red]Directory {input_dir} not found![/red]")
        exit()
    os.makedirs(output_dir, exist_ok=True)
    txt_dir = os.path.join(output_dir, "txt")
    tmp_wav_dir = os.path.join(output_dir, "tmp_wav")
    os.makedirs(tmp_wav_dir, exist_ok=True)
    os.makedirs(txt_dir, exist_ok=True)
    if not os.path.exists(basis_csv_path):
        console.print(f"[red]File {basis_csv_path} not found![/red]")
        exit()
    rating_basic = pd.read_csv(basis_csv_path, encoding="utf-8", header=0)
    rating_basic = rating_basic.fillna("")
    rating_basic_list = rating_basic.to_dict(orient="records")
    if print_table:
        console.print(f"Rating basic list #{len(rating_basic_list)} Dimensions.")
        table = Table(show_header=True, header_style="bold magenta")
        table.add_column("监控维度", style="dim", width=12)
        table.add_column("监控标准", style="dim", width=12)
        table.add_column("标签", style="dim", width=12)
        table.add_column("选项", style="dim", width=12)
        for item in rating_basic_list:
            table.add_row(item["监控维度"], item["监控标准"], item["标签"], item["选项"])
        console.print(table)
    console.rule("Start processing files...")
    return tmp_wav_dir,txt_dir,rating_basic_list

# Function to get diarization and transcribed content from a wav file
def get_diarization_content(file_path, emotion_time_threshold=2, emotion_threshold=0.8,
        # New in Version 2
        speaker_label_in_channels=["Speaker 1"]):
    # first get the channel number of <file_path>
    # and assert len(speaker_label_in_channels) == channel_num
    data,sr = torchaudio.load(file_path)
    channel_num = data.size(0)
    assert len(speaker_label_in_channels) <= channel_num, \
        f"len(speaker_label_in_channels):{len(speaker_label_in_channels)} must be less than or equal to channel_num:{channel_num}"
    # for each channel, we will get the diarization result and output the content
    full_content = []
    for i in range(min(len(speaker_label_in_channels),channel_num)):
        # change the channel number of dm_model
        dm_model.channel = i
        diar_max_num_spks = 1
        speaker_label = speaker_label_in_channels[i]
        try:
            r = dm_model.diarize(file_path)
            for data in r:
                # spk_label = data[3]
                start_time = data[1]
                end_time = data[2]
                generate_text = stt_model.transcribe_file(file_path, start=start_time, end=end_time, channel=i)
                if end_time - start_time > emotion_time_threshold:
                    emotion = stt_model.emo_classify_file(file_path, start=start_time, end=end_time, channel=i)
                    emotion_label = emotion["labels"][emotion["scores"].index(max(emotion["scores"]))]
                    emotion_label = emotion_label.split("/")[0] # 开心，中立，悲伤，愤怒，惊讶，恐惧
                    # 0: 生气 angry
                    # 1: 厌恶 disgusted
                    # 2: 恐惧 fearful
                    # 3: 开心 happy
                    # 4: 中立 neutral
                    # 5: 其他 other
                    # 6: 难过 sad
                    # 7: 惊讶 surprised
                    # 8: unknown
                    emotion_score = max(emotion["scores"])
                    if emotion_score > emotion_threshold:
                        emotion_text = f"(emotion：{emotion_label} with score: {emotion_score:.2f})"
                    else:
                        emotion_text = ""
                        emotion_label = ""
                    
                    emotion_label2 = analyze_emotion(file_path,start=start_time,end=end_time,language="zh")
                else:
                    emotion_text = ""
                    emotion_label = ""
                    emotion_label2 = ""
                now_content = f"{generate_text}" #+ emotion_text # \nSpeaker {spk_label}: 
                full_content.append([start_time, end_time, now_content, speaker_label, emotion_label, emotion_label2])
        except Exception as e:
            console.print(f"[red]Error processing file {file_path}: {str(e)}[/red]")
            # full_content.append([0, 0, "", speaker_label])
    # sort full_content by start_time
    full_content = sorted(full_content,key=lambda x:x[0])
    return full_content

# Function to process a single wav file and return all evaluation results
def evaluate_wav_file(content,dimension_list):
    output = {}
    colors = ["red", "green", "blue", "yellow", "magenta", "cyan"]
    for item in dimension_list:
        try:
            color = colors[dimension_list.index(item) % 6]
            options = item["选项"].split("|")
            if options and len(options) > 1:
                now_options = options
            else:
                now_options = OPTIONS
            result, reason = dagent.analyze_content(content, item["监控维度"], item["监控标准"],
                                            options=now_options,
                                            multiple=False, add_to_history=False, notes=NOTES)
            output[item["监控维度"]+"_"+item["标签"]] = result
            output[item["监控维度"]+"_"+item["标签"]+"_原因"] = reason
        except Exception as e:
            console.print(f"[red]Error evaluating content: {str(e)}[/red]")
            output[item["监控维度"]+"_"+item["标签"]] = "出错了"
            output[item["监控维度"]+"_"+item["标签"]+"_原因"] = str(e)
    return output

# Function to process a single wav file and return all evaluation results
def evaluate_wav_file_v2(content,dimension_list,num_processes=4):
    output = {}
    options_list = []
    prompt_list = []
    option_type_list = []
    need_reason_list = []
    multiple_list = []
    add_to_history_list = []
    max_try_list = []
    examples_list = []
    notes_list = []
    max_try_list = []
    num_processes = 6
    for item in dimension_list:
            options = item["选项"].split("|")
            if options and len(options) > 1:
                now_options = options
            else:
                now_options = OPTIONS
            dimension = item["监控维度"]
            standard = item["监控标准"]
            prompt = f"<Content to be evaluated>\n{content}<Content to be evaluated>\nBased on the above content, \
                please evaluate whether it meets the requirements for the <{dimension}> dimension, \
                    \nThe evaluation criteria are:\n{standard}>"
            options_list.append(now_options)
            prompt_list.append(prompt)
            option_type_list.append(f"Does the content meet the requirements for the {dimension} dimension?")
            need_reason_list.append(True)
            multiple_list.append(False)
            add_to_history_list.append(False)
            notes_list.append(NOTES)
            examples_list.append("")
            max_try_list.append(3)
    print(f"options_list len: {len(options_list)}")
    print(f"prompt_list len: {len(prompt_list)}")
    print(f"option_type_list len: {len(option_type_list)}")
    print(f"need_reason_list len: {len(need_reason_list)}")
    print(f"multiple_list len: {len(multiple_list)}")
    print(f"add_to_history_list len: {len(add_to_history_list)}")
    print(f"max_try_list len: {len(max_try_list)}")
    print(f"examples_list len: {len(examples_list)}")
    print(f"notes_list len: {len(notes_list)}")

    # try:
    result_list_multi = dagent.choose_multi_process(
        options_list=options_list,
        prompt_list=prompt_list,
        option_type_list=option_type_list,
        need_reason_list=need_reason_list,
        multiple_list=multiple_list,
        # add_to_history_list=add_to_history_list,
        max_try_list=max_try_list,
        examples_list=examples_list,
        notes_list=notes_list,
        num_processes=num_processes
    )
    print(result_list_multi)

    for i,result in enumerate(result_list_multi):
        print(result)
        _name = dimension_list[i]["监控维度"]
        _label = dimension_list[i]["标签"]
        if isinstance(result["choice"],list):
            output[f"{_name}_{_label}"] = result["choice"][0]
        else:
            output[f"{_name}_{_label}"] = result["choice"]

        if isinstance(result["reason"],list):
            output[f"{_name}_{_label}_原因"] = result["reason"][0]
        else:
            output[f"{_name}_{_label}_原因"] = result["reason"]

    return output

# def add_emo(label):
#     # 生气\厌恶恐惧
#     # 开心
#     # 中立
#     # 其他
#     # 难过
# 检测异常情绪
def check_emotion(label):
    # 如果检测到 生气\厌恶\恐惧\难过
    # 以及对应的英文，angry,disgusted,fearful,sad
    # 则返回True,否则返回False
    if label in ["生气","厌恶","恐惧","难过"]:
        return True
    elif label in ["angry","disgusted","fearful","sad"]:
        return True
    else:
        return False

def concat_conversation(list_data):
    total_unnormal_emotion = 0
    total_emotion = 0
    conversation = ""
    for _ in content:
        conversation += f"{_[3]}: {_[2]} "
        if _[4]:
            total_emotion += 1
            if check_emotion(_[4]):
                conversation += f"<情绪异常>[模型1]:{_[4]}</情绪异常> "
                total_unnormal_emotion += 1
        if _[5]:
            total_emotion += 1
            if check_emotion(_[5]):
                conversation += f"<情绪异常>[模型2]:{_[5]}</情绪异常> "
                total_unnormal_emotion += 1
        conversation += "\n"
    return conversation, total_unnormal_emotion, total_emotion

if __name__ == "__main__":
    # r = get_diarization_content("/home/zhaosheng/Documents/dlangchain/app/customer_service_evaluation_system_v2/test/1-2.wav", speaker_label_in_channels=["客服","客户"])
    # for _ in r:
    #     if _[4]:
    #         print(f"{_[0]:.2f}-{_[1]:.2f} {_[3]}: {_[2]} ({_[4]}) ({_[5]})")
    #     else:
    #         print(f"{_[0]:.2f}-{_[1]:.2f} {_[3]}: {_[2]}")
    # test qwenaudio
    # r = analyze_emotion("test.wav",start=5,end=10)
    # print(r)
    base_dir = os.path.dirname(os.path.abspath(__file__))

    # INIT FOLDER
    output_dir=os.path.join(base_dir,"output/result",OUTPUT_DIR)
    tmp_wav_dir,txt_dir,rating_basic_list = init_folder(INPUT_DIR, output_dir, os.path.join(base_dir,BASIS_CSV_PATH))
    # OUTPUT CSV FILE
    
    csv_file = os.path.join(base_dir, output_dir, f"evaluation_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv")
    # START PROCESSING
    with open(csv_file, mode="w", newline="", encoding="utf-8") as file:
        writer = csv.writer(file)
        column_str = "ID,"
        for _ in rating_basic_list:
            column_str += f"{_['监控维度']}({_['标签']}),{_['监控维度']}原因({_['标签']}),"
        column_str += "总情绪数,异常情绪数,异常情绪比例"
        writer.writerow(column_str.split(","))
        for filename in tqdm(os.listdir(INPUT_DIR)):
            if filename.endswith(".wav") and "channel" not in filename:
                # beacuse the dguard will generate a new file with 
                # the same name but with "channel" in the filename
                try:
                    file_path = os.path.join(INPUT_DIR, filename)
                    file_id = os.path.splitext(filename)[0]
                    txt_output_path = os.path.join(txt_dir, f"{file_id}.txt")
                    if os.path.exists(txt_output_path) and os.path.getsize(txt_output_path) > 0:
                        with open(txt_output_path, "r", encoding="utf-8") as txt_file:
                            content = txt_file.read()
                            # 跳过已经处理过的文件
                            continue
                    else:
                        # Use ffmpeg to convert the audio file to 16k, single channel and save it in <filename>.wav
                        file_path_new = os.path.join(tmp_wav_dir, f"{file_id}.wav")
                        subprocess.run(f"ffmpeg -y -i  {file_path} -ar 16000 {file_path_new}", shell=True)
                        file_path = file_path_new
                        # Get content and evaluate
                        if len(file_path.split("/")[-1].split(".")[0])>19:
                            speaker_label_in_channels = ["客服","客户"]
                        else:
                            speaker_label_in_channels = ["客户","客服"]
                        content = get_diarization_content(file_path, speaker_label_in_channels=speaker_label_in_channels)
                        content, total_unnormal_emotion, total_emotion = concat_conversation(content)
                        # Save the content to a txt file
                        if content:
                            txt_output_path = os.path.join(txt_dir, f"{file_id}.txt")
                            with open(txt_output_path, "w", encoding="utf-8") as txt_file:
                                txt_file.write(content)
                    if len(content) < 5:
                        # Skip if content is too short
                        continue
                    # Get evaluation results
                    results = evaluate_wav_file_v2(content, rating_basic_list)
                    # Write results to CSV and print them
                    if results:
                        writer.writerow([file_id] + list(results.values()) + [total_emotion,total_unnormal_emotion,f"{total_unnormal_emotion/total_emotion:.2f}"] )
                        file.flush()  # Flush the buffer to ensure the file is updated
                        console.rule(f"[bold blue]Wrote row: {file_id} - {results}[/bold blue]")
                except Exception as e:
                    console.print(f"[red]Error processing file {filename}: {str(e)}[/red]")
    console.print(f"[bold green]Process completed! Results are saved in {output_dir}[/bold green]")