import pandas as pd
import arrow, re, glob, json
from playwright.sync_api import sync_playwright, Playwright
from pathlib import Path

""" 读取Excel文件,筛选role列值为'来访者'的content列的数据,筛选role列值为'治疗师'的content列的数据
Keyword arguments:
file -- 输入Excel文件路径
Return
df -- pandas.DataFrame
"""

def formate_ipt_data(file: str):
    # 读取Excel文件
    df = pd.read_excel(file, sheet_name='Sheet1')

    # 检查数据结构
    print(f"原始数据形状: {df.shape}")
    print(f"角色分布:\n{df['role'].value_counts()}")

    # 校验df第一行的role是否为来访者
    if df.iloc[0]['role'] != '来访者':
        raise Exception('df第一行的role不是来访者')

    # 创建一个新的DataFrame来存储配对的对话
    paired_data = []
    user_message = None

    for i, row in df.iterrows():
        if row['role'] == '来访者' or row['role'] == '患者':
            # 如果当前行是来访者，保存消息
            user_message = row['content']
        elif (row['role'] == '治疗师' or row['role'] == '治疗师') and user_message is not None:
            # 如果当前行是治疗师/治疗师，且有前面的来访者消息，则形成一对
            paired_data.append({
                'user': user_message,
                'counselor': row['content']
            })
            user_message = None  # 重置来访者消息

    # 创建新的DataFrame
    result_df = pd.DataFrame(paired_data)

    # 检查是否有未配对的消息
    if user_message is not None:
        print("警告: 有未配对的来访者消息")

    # 检查结果
    print(f"处理后数据形状: {result_df.shape}")
    print(f"NaN值数量: {result_df.isna().sum()}")

    return result_df

"""导出txt文件
Keyword arguments:
df -- pandas.DataFrame
output_txt_file -- 输出txt文件路径
Return -- txt文件
"""
def to_txt(df,output_txt_file):
    # 将问答对转化写入.txt文档中用换行符分割
    with open(output_txt_file, 'w', encoding='utf-8') as f:
        for i in range(len(df)):
            row = df.iloc[i]
            f.write(f"第{i+1}轮对话:\n")
            if i == 0:
                f.write(f"上下文：\n")
            else:
                f.write(f"上下文：" + f"来访者：{df.iloc[i-1]['user'].replace('\n', '')}" + f"\\n"+ f"治疗师：{df.iloc[i-1]['counselor'].replace('\n', '')}\n")
            f.write(f"用户输入（来访者）：{row['user'].replace('\n', '')}\n")
            f.write(f"参考回复（治疗师）：{row['counselor'].replace('\n', '')}\n")
            f.write(f"来访者意图：\n")
            f.write(f"治疗师意图：\n")
            f.write(f"应答策略：\n\n")

"""将标注好的excel数据转化成txt文档用于RAG导入
Keyword arguments:
file -- excel文件路径
output_txt_file -- 输出txt文件路径
chunk -- 输出格式（1：上下文+用户输入+ 参考回复+ 应答策略，2：上下文 + 意图+（来访者说的话/治疗师说的话），3：上下文 + 意图+（来访者说的话/治疗师说的话）+ 应答策略）
"""
def excel_to_txt(file:str,output_txt_file:str,chunk: int = 1):
    # 读取Excel文件
    df = pd.read_excel(file)

    with open(output_txt_file, 'w', encoding='utf-8') as f:
        for i in range(len(df)):
            row = df.iloc[i]
            if chunk == 1:
                if i == 0:
                    f.write(f"上下文：\n")
                else:
                    f.write(f'上下文：{re.sub(r'\\n', '', row['context'].replace('\n', ''))}\n')
                f.write(f"用户输入（来访者）：{row['user'].replace('\\n', '').replace('\n', '')}\n")
                f.write(f"参考回复（治疗师）：{row['counselor'].replace('\\n', '').replace('\n', '')}\n")
                f.write(f"来访者意图：{row['user_intentions'].replace('\\n', '').replace('\n', '')}\n")
                f.write(f"治疗师意图：{row['counselor_intentions'].replace('\\n', '').replace('\n', '')}\n")
                f.write(f"应答策略：{row['response_strategies'].replace('\\n', '').replace('\n', '')}\n\n")
            elif chunk == 2:
                if i == 0:
                    f.write(f"上下文：\n")
                else:
                    f.write(f'上下文：{re.sub(r'\\n', '', row['context'].replace('\n', ''))}\n')
                f.write(f"[{row['user_intentions'].replace('\\n', '').replace('\n', '')}]{row['user'].replace('\\n', '')}\n")
                f.write(f"[{row['counselor_intentions'].replace('\\n', '').replace('\n', '')}]{row['counselor'].replace('\\n', '')}\n\n")
            elif chunk == 3:
                if i == 0:
                    f.write(f"上下文：\n")
                else:
                    f.write(f'上下文：{re.sub(r'\\n', '', row['context'].replace('\n', ''))}\n')
                f.write(f"用户输入（来访者）：{row['user'].replace('\\n', '').replace('\n', '')}\n")
                f.write(f"应答策略：{row['response_strategies'].replace('\\n', '').replace('\n', '')}\n\n")
            elif chunk == 4:
                f.write(f"来访者意图：{row['user_intentions'].replace('\\n', '').replace('\n', '')}\n")
                f.write(f"应答策略：{row['response_strategies'].replace('\\n', '').replace('\n', '')}\n\n")


"""将标注好的excel数据转化成csv格式
Keyword arguments:
file -- excel文件路径
output_csv_file -- 输出txt文件路径
chunk -- 输出格式（1：上下文+用户输入+ 参考回复+ 应答策略，2：上下文 + 意图+（来访者说的话/治疗师说的话），3：上下文 + 意图+（来访者说的话/治疗师说的话）+ 应答策略）
"""
def excel_to_csv(file: str,output_csv_file: str,chunk: int = 1):
    # 读取Excel文件
    df = pd.read_excel(file, sheet_name='Sheet1')
    contents = []
    for i in range(len(df)):
        row = df.iloc[i]
        if i == 0:
            context = "上下文：" + '\n'
        else:
            context = "上下文：" + re.sub(r'\\n', '   ', row['context'])
        user = "用户输入（来访者）：" + row['user'].replace('\\n', '').replace('\n', '') + '\n'
        counselor = "参考回复（治疗师）：" + row['counselor'].replace('\\n', '').replace('\n', '') + '\n'
        user_intentions = "来访者意图：" + row['user_intentions'].replace('\\n', '').replace('\n', '') + '\n'
        counselor_intentions = "治疗师意图：" + row['counselor_intentions'].replace('\\n', '').replace('\n', '') + '\n'
        response_strategies = "应答策略：" + row['response_strategies'].replace('\\n', '').replace('\n', '') + '\n'
        if chunk == 1:
            content = context + user + counselor + user_intentions + counselor_intentions + response_strategies
        elif chunk == 2:
            content = context + f"[{row['user_intentions'].replace('\\n', '')}]{row['user'].replace('\\n', '')}\n" + f"[{row['counselor_intentions'].replace('\\n', '')}]{row['counselor'].replace('\\n', '')}\n"
        elif chunk == 3:
            content = context + f"[{row['user_intentions'].replace('\\n', '')}]{row['user'].replace('\\n', '')}\n" + f"[{row['counselor_intentions'].replace('\\n', '')}]{row['counselor'].replace('\\n', '')}\n" + response_strategies
        else:
            content = ''
        contents.append(content)
    result = pd.DataFrame({
        '分段内容': contents
        })
    result.to_csv(output_csv_file, index=False)


""" 使用llm改写RAG查询
Keyword arguments:
p -- playwright.sync_api
df -- dataframe 
Return -- dataframe
"""
def run1(p: Playwright, df: pd.DataFrame):
    chromium = p.chromium  # or "firefox" or "webkit".
    browser = chromium.launch(
        headless=False,
        slow_mo=50,
    )
    context = browser.new_context(
        storage_state="./output/login.json",
        # viewport={ 'width': 1920, 'height': 1080 }
    )
    # 开启一个空Tab页
    page = context.new_page()

    domain = "http://172.16.12.243" # 登录地址239
    url = "/app/9e9ee3f5-bd30-486f-aa93-b9952a6f36ae/configuration"  # 重写查询的智能体
    page.goto(domain + url)

    # 初始化重写内容数组
    users = []
    counselors = []


    for i in range(len(df)):
        row = df.iloc[i]
        user_content = row["user"]
        counselor_content = row["counselor"]

        if i == 0:
            rewrite_content = user_content
        else:
            context = ''
            hist_turns = 5
            start = max(0, i - hist_turns)
            print(f"start: {start}")
            for x in range(start, i):
                context = context + "来访者：" + df.iloc[x]["user"].replace('\n','') +  "\n治疗师：" + df.iloc[x]["counselor"].replace('\n','') + '\n'

            # 点击清空按钮
            page.get_by_role("button", name="清空").click()
            # 输入上下文内容
            page.get_by_placeholder("上下文").fill(context)
            # 输入来访者输入内容
            page.get_by_placeholder("来访者输入").fill(user_content)
            # 点击运行按钮
            page.get_by_role("button", name="运行").click()

            # 等待span标签中包含“个字符”这几个子元素出现
            page.wait_for_selector('span:has-text("个字符")')

            while True:
                # 获取span标签中包含“个字符”的元素的文本内容
                last_token_txt = page.query_selector('span:has-text("个字符")').inner_text()

                # 等待500毫秒
                page.wait_for_timeout(1000)

                token_txt = page.query_selector('span:has-text("个字符")').inner_text()

                # 文本数字不变了，说明重写完成
                if last_token_txt == token_txt:
                    p_tags = page.query_selector(".markdown-body").query_selector_all("p")
                    rewrite_content = ''.join([p_tag.inner_text() for p_tag in p_tags])
                    rewrite_content = rewrite_content.replace('\n', '').replace('重写来访者输入：', '')
                    print(f"上下文：\n{context}")
                    print(f'来访者：{user_content}')
                    print(f"rewrite_content: {rewrite_content}")
                    print(f"------------------------")
                    page.wait_for_timeout(2000)
                    break

        users.append(rewrite_content)
        counselors.append(counselor_content)

    # 将对话的内容转化成角色”和对话内容的DataFram
    # result  = pd.DataFrame({
    #     'role': ['来访者' if i % 2 == 0 else '治疗师' for i in range(len(users)*2)],
    #     'content': [item for pair in zip(users, counselors) for item in pair]
    #     })
    # 将对的内容转化成来访者说的话和治疗师说的话
    result = pd.DataFrame({
        'user': users,
        'counselor': counselors
        })
    return result


""" 使用llm进行标注
Keyword arguments:
p -- playwright.sync_api
df  -- dataframe 标注数据
Return -- dataframe
"""
def run2(p: Playwright,df:  pd.DataFrame):
    chromium = p.chromium  # or "firefox" or "webkit".
    browser = chromium.launch(
        headless=False,
        slow_mo=50,
    )
    context = browser.new_context(
        storage_state="./output/login.json",
        # viewport={ 'width': 1920, 'height': 1080 }
    )
    # 开启一个空Tab页
    page = context.new_page()

    domain = "http://172.16.12.243" # 登录地址243
    url = "/app/e0696211-f394-4b40-8570-537c736a8d38/configuration"  # 数据标注的智能体
    page.goto(domain + url)

    # 初始化标签数组
    areas = []
    emotional_states = []
    communication_patterns = []
    core_interpersonal_beliefs = []
    stages = []
    therapist_interventions = []
    therapist_questions = []
    focuses = []
    session_structures = []

    for  i in range(len(df)):
        row = df.iloc[i]
        user_content = row["user"]
        counselor_content = row["counselor"]

        # 点击清空按钮
        page.get_by_role("button", name="清空").click()
        # 输入来访者输入内容
        page.get_by_placeholder("来访者").fill(user_content)
        # 输入治疗师输入内容
        page.get_by_placeholder("治疗师").fill(counselor_content)
        # 点击运行按钮
        page.get_by_role("button", name="运行").click()

        # 等待span标签中包含“个字符”这几个子元素出现
        page.wait_for_selector('span:has-text("个字符")')


        while True:
            # 等待1000毫秒
            page.wait_for_timeout(1000)

            # 获取span标签中包含“个字符”的元素的文本内容
            last_token_txt =  page.query_selector('span:has-text("个字符")').inner_text()

            # 等待500毫秒
            page.wait_for_timeout(1000)

            token_txt = page.query_selector('span:has-text("个字符")').inner_text()

            # 如果两个文本内容相同，则跳出循环
            if last_token_txt == token_txt:
                # 循环次数为10次
                count = 0
                while count  < 10:
                    # 获取最新的code标签的内容
                    code_loc = page.query_selector('div[node] code')

                    if code_loc:
                        # 获取最新的code标签的文本内容
                        code_loc_txt = code_loc.inner_text()
                        # 把行号那列干掉（正则或简单，split）每行前面都是 “数字+空格” 直接删掉
                        no_line_txt = re.sub(r'^\s*\d+\s*', '', code_loc_txt, flags=re.M)
                        break
                    else:
                        page.wait_for_timeout(1000)
                        count += 1
                        print(f"第{count}次获取不到数据")
                else:
                    print(f"获取不到数据")
                    # 再次点击运行按钮
                    page.get_by_role("button", name="运行").click()
                    continue

                try:
                    json_data = json.loads(no_line_txt)
                except json.JSONDecodeError as e:
                    print(f"JSON解析失败: {e}")
                    print(f"原始数据: {token_txt}")
                    continue

                # 获取标签内容
                if "area" in  json_data.get("metadata", {}):
                    area = json_data["metadata"]["area"]
                else:
                    area = ""

                if "emotional_state" in  json_data.get("metadata", {}):
                    emotional_state = json_data["metadata"]["emotional_state"]
                else:
                    emotional_state = ""

                if "communication_pattern" in  json_data.get("metadata", {}):
                    communication_pattern = json_data["metadata"]["communication_pattern"]
                else:
                    communication_pattern = ""

                if "core_interpersonal_belief" in  json_data.get("metadata", {}):
                    core_interpersonal_belief = json_data["metadata"]["core_interpersonal_belief"]
                else:
                    core_interpersonal_belief = ""

                if "stage" in  json_data.get("metadata", {}):
                    stage = json_data["metadata"]["stage"]
                else:
                    stage = ""

                if "therapist_intervention" in  json_data.get("metadata", {}):
                    therapist_intervention = json_data["metadata"]["therapist_intervention"]
                else:
                    therapist_intervention = ""

                if "therapist_question" in  json_data.get("metadata", {}):
                    therapist_question = json_data["metadata"]["therapist_question"]
                else:
                    therapist_question = ""

                if "focus" in  json_data.get("metadata", {}):
                    focus = json_data["metadata"]["focus"]
                else:
                    focus = ""

                if "session_structure" in  json_data.get("metadata", {}):
                    session_structure = json_data["metadata"]["session_structure"]
                else:
                    session_structure = ""

                # 将标签内容添加到数组中
                areas.append(area)
                emotional_states.append(emotional_state)
                communication_patterns.append(communication_pattern)
                core_interpersonal_beliefs.append(core_interpersonal_belief)
                stages.append(stage)
                therapist_interventions.append(therapist_intervention)
                therapist_questions.append(therapist_question)
                focuses.append(focus)
                session_structures.append(session_structure)
                break

    # 将对话的内容转化成角色”和对话内容的DataFram
    result  = pd.DataFrame({
        'user': df["user"],
        'counselor': df["counselor"],
        'area':  areas,
        'emotional_state': emotional_states,
        'communication_pattern': communication_patterns,
        'core_interpersonal_belief': core_interpersonal_beliefs,
        'stage': stages,
        'therapist_intervention': therapist_interventions,
        'therapist_question': therapist_questions,
        'focus': focuses,
        'session_structure': session_structures
        })
    return result


""" 使用llm进行标注
Keyword arguments:
p -- playwright.sync_api
df  -- dataframe 标注数据
Return -- dataframe
"""
def run3(p: Playwright,df:  pd.DataFrame):
    chromium = p.chromium  # or "firefox" or "webkit".
    browser = chromium.launch(
        headless=False,
        slow_mo=50,
    )
    context = browser.new_context(
        storage_state="./output/login.json",
        # viewport={ 'width': 1920, 'height': 1080 }
    )
    # 开启一个空Tab页
    page = context.new_page()

    domain = "http://172.16.12.243" # 登录地址243
    url = "/app/e0696211-f394-4b40-8570-537c736a8d38/configuration"  # 数据标注的智能体
    page.goto(domain + url)

    for  i in range(len(df)):
        row = df.iloc[i]

        if any(pd.isna(row[col]) for col in ["question_type", "skill", "focus"]):
            user_content = row["user"]
            counselor_content = row["counselor"]

            input_content = "来访者：" + user_content + "\n" + "治疗师：" + counselor_content

            # 点击清空按钮
            page.get_by_role("button", name="清空").click()
            # 输入查询内容
            page.get_by_placeholder("Query").fill(input_content)
            # 点击运行按钮
            page.get_by_role("button", name="运行").click()

            # 等待span标签中包含“个字符”这几个子元素出现
            page.wait_for_selector('span:has-text("个字符")')

            while True:
                # 获取span标签中包含“个字符”的元素的文本内容
                last_token_txt = page.query_selector('span:has-text("个字符")').inner_text()

                # 等待500毫秒
                page.wait_for_timeout(1000)

                token_txt = page.query_selector('span:has-text("个字符")').inner_text()

                # 如果两个文本内容相同，则跳出循环
                if last_token_txt == token_txt:
                    # 获取最新的code标签的文本内容，增加错误处理
                    code_loc = page.query_selector('div[node] code.language-json, div[node] code.language-jsonl')
                    code_loc_txt = code_loc.inner_text()

                    #把行号那列干掉（正则或简单，split）每行前面都是 “数字+空格” 直接删掉
                    no_line_txt = re.sub(r'^\s*\d+\s*', '', code_loc_txt, flags=re.M)

                    # 转化成json，添加错误处理
                    if not no_line_txt.strip():
                        print("JSON数据为空")
                        continue
                    try:
                        json_data = json.loads(no_line_txt)
                    except json.JSONDecodeError as e:
                        print(f"JSON解析失败: {e}")
                        print(f"原始数据: {no_line_txt}")
                        continue

                    # 添加标签数据到dataframe，标签数据在metadata中

                    df.loc[i, "area"] = json_data["metadata"]["area"]
                    df.loc[i, "emotional_state"] = json_data["metadata"]["emotional_state"]
                    df.loc[i, "communication_pattern"] = json_data["metadata"]["communication_pattern"]
                    df.loc[i, "core_interpersonal_belief"] = json_data["metadata"]["core_interpersonal_belief"]
                    df.loc[i, "stage"] = json_data["metadata"]["stage"]
                    df.loc[i, "therapist_intervention"] = json_data["metadata"]["therapist_intervention"]
                    df.loc[i, "therapist_question"] = json_data["metadata"]["therapist_question"]
                    df.loc[i, "focus"] = json_data["metadata"]["focus"]
                    df.loc[i, "session_structure"] = json_data["metadata"]["session_structure"]
                    break

    return df

"""通用方法：根据labels列表生成n合一标签
Keyword arguments:
df -- dataframe 标注数据
labels -- 标签列表
"""


def combine_labels(df, labels):
    # 筛选存在于DataFrame中的标签
    valid_labels = [label for label in labels if label in df.columns]

    # 如果没有有效标签，返回空字符串Series
    if not valid_labels:
        return pd.Series([''] * len(df), index=df.index, name='label')

    # 同时处理所有列：转换为字符串、去空格、替换空值和'nan'
    processed = df[valid_labels].astype(str).apply(lambda x: x.str.strip())
    processed = processed.replace(['', 'nan'], '')

    # 合并非空值，使用下划线连接
    # 先过滤每行的空字符串，再用下划线连接
    return processed.apply(lambda row: '_'.join(filter(None, row)), axis=1).rename('label')

"""将标注数据按照标签相同，导出txt文件
Keyword arguments:
df -- dataframe 标注数据
save_to -- 输出文件路径
"""
def marked_to_txt(df: pd.DataFrame, save_to: str):
    labels = [
        "area",
        "emotional_state",
        "communication_pattern",
        "core_interpersonal_belief",
        "stage",
        "therapist_intervention",
        "therapist_question",
        "focus",
        "session_structure",
    ]
    # 构造“n合一”标签，用下划线"_"连接
    df["label"] = combine_labels(df, labels)

    # 按照label标签进行分组
    grouped = df.groupby("label")

    # 遍历每个标签
    for label, group in grouped:
        # 导出文件地址
        out_txt_file = save_to + f"/{label}.txt"
        # 将每个标签的对话内容保存到txt文件中
        df_to_txt(group, out_txt_file)


"""将dataframe导出为txt文件
df -- dataframe 标注数据
output_txt_file -- 输出文件路径
segment -- 分隔符
"""
def df_to_txt(df: pd.DataFrame, output_txt_file: str | Path, segment: str = '------'):
    with open(output_txt_file, "w", encoding="utf-8") as f:
        for i in range(len(df)):
            row = df.iloc[i]
            if i == 0:
                f.write(f"{segment}\n")
            f.write(f"来访者：{row['user'].replace('\\n', '').replace('\n', '')}\n")
            f.write(f"参考回复：{row['counselor'].replace('\\n', '').replace('\n', '')}\n")
            f.write(f"{segment}\n")
    print(f"数据已保存到 {output_txt_file}")

"""合并文件夹中的数据
Keyword arguments:
root_dir -- 输入文件夹根目录
pathname -- 匹配的 “路径模式”，必填
Return -- dataframe
"""
def merge_data(root_dir: str | Path, pathname: str = "/*.xlsx") -> pd.DataFrame:
    # 读取文件夹中的所有excel文件
    files = glob.glob(root_dir + pathname)
    # 读取文件夹中的所有xlsx文件中的数据
    dfs = [pd.read_excel(file) for file in files]
    # 合并所有数据
    merged_df = pd.concat(dfs)
    return merged_df

if __name__ == '__main__':
    # 对话编码
    dialogue_codes = [
        # 'H006',
        # 'H013',
        # 'H015',
        # 'Z016',
        # 'Z038',
        # 'Z039',
        # 'zhou',
        'XYY01'
    ]
    # input_file = './input/ipt/marked/' + dialogue_code + '_marked.xlsx'  # 已完成标注的文件，用于生成RAG导入的txt文件或再次标注


    # out_xlsx_file = './output/ipt/' + dialogue_code + "_" + arrow.now().format('YYMMDDHHmmss') + '.xlsx'
    # output_txt_file = './output/ipt/' + dialogue_code + "_" + arrow.now().format('YYMMDDHHmmss') + '.txt'
    # output_csv_file = './output/ipt/' + dialogue_code + "_" + arrow.now().format('YYMMDDHHmmss') + '.csv'
    output_path = './output/ipt'  # 输出文件路径


    """
    步骤1：使用llm改写RAG查询
    """
    # with sync_playwright() as pw:
    #     for dialogue_code in dialogue_codes:
    #         input_file = './input/ipt/source/' + dialogue_code + '.xlsx'  # 原始数据，用户查询改写
    #         output_xlsx_file = './output/ipt/' + dialogue_code + "_rewrite_" + arrow.now().format('YYMMDDHHmmss') + '.xlsx'
    #         dt =  formate_ipt_data(input_file)   # 读取Excel文件
    #         # query改写后的excel文件
    #         dt = run1(pw,dt)
    #         #  将DataFrame保存为excel文件
    #         dt.to_excel(output_xlsx_file, index=False)
    #         print(f"数据已保存到 {output_xlsx_file}")

    """
    步骤2-1：使用llm进行重写数据标注
    """
    # with sync_playwright() as pw:
    #     for dialogue_code in dialogue_codes:
    #         input_file = './input/ipt/rewrite/' + dialogue_code + '_rewrite.xlsx'  # 改写完成的数据，用于标注
    #         output_xlsx_file = './output/ipt/' + dialogue_code + "_marked_" + arrow.now().format('YYMMDDHHmmss') + '.xlsx'
    #         #  读取Excel文件
    #         dt = pd.read_excel(input_file)
    #         # 进行改写数据标注
    #         dt = run2(pw,dt)
    #         # 将DataFrame保存为excel文件
    #         dt.to_excel(output_xlsx_file, index=False)
    #         print(f"数据已保存到 {output_xlsx_file}")

    """
    步骤2-2：使用llm进行筛选已标注的数据中为空的再次进行标注
    """
    # with sync_playwright() as pw:
    #     output_xlsx_file = './output/ipt/' + dialogue_code + "_marked_" + arrow.now().format('YYMMDDHHmmss') + '.xlsx'
    #     #  读取Excel文件
    #     dt = pd.read_excel(input_file)
    #     # 筛选已经标注的数据，并筛选出为空的进行重新标注
    #     dt = run3(pw,dt)
    #     # 将DataFrame保存为excel文件
    #     dt.to_excel(output_xlsx_file, index=False)
    #     print(f"数据已保存到 {output_xlsx_file}")

    """
    步骤2-2：将重写的数据合并成一个文件，并转化成txt文件
    """
    input_path = './input/ipt/rewrite/'  # 已完成标注的文件夹，用于生成RAG导入的txt文件
    output_txt_file = './output/ipt/' + '无标注对话' + arrow.now().format('YYMMDDHHmmss') + '.txt'
    # 匹配包含XYY01的xlsx文件
    pathname = "/*XYY01*.xlsx"  # 匹配的 “路径模式”，必填（只转化XYY01的文件）
    dt = merge_data(input_path, pathname)
    # 将DataFrame保存为txt文件
    df_to_txt(dt, output_txt_file)

    """
    步骤3：将标注的文件按照标注的标签的内容导出对应的标签文件
    """
    # input_path = './input/ipt/marked/'  # 已完成标注的文件夹，用于生成RAG导入的txt文件
    # dt = merge_data(input_path)
    # # 将标注的文件按照标注的标签的内容导出对应的标签文件
    # marked_to_txt(dt,output_path)
    # print(dt)
    # print(f"数据已保存到 {output_path}")