import argparse

import pandas as pd
from elasticsearch import Elasticsearch
from llama_index.core.schema import TextNode

from iflytech_assistant.assistant.dataclasses import RagData
from iflytech_assistant.es import index

parser = argparse.ArgumentParser()

parser.add_argument("file", help="file path")
parser.add_argument("-i", "--index", help="index name")

args = parser.parse_args()

es = Elasticsearch("http://localhost:9200", http_auth=("elastic", "telecom12345"))

INDEX_NAME = args.index

df = pd.read_csv(args.file)
# filter clickrate >= 0.08
df = df[df["clickrate"] >= 0.08]

# only keep exposure_content,userinput
df = df[["exposure_content", "userinput"]]

# group by userinput
df = df.groupby("userinput").agg(lambda x: "\n".join(x)).reset_index()

tags = [
    "八卦话唠",
    "爽文大女主",
    "鬼马精灵",
    "搞笑达人",
    "治愈小太阳",
    "社牛E人",
    "高情商大师",
    "委婉优雅",
    "毒舌嘴替",
    "撒娇卖萌",
    "温柔体贴",
    "彩虹夸夸",
    "高冷傲娇",
    "直接了当",
    "阴阳怪气",
    "加油打气",
    "亲切友好",
    "二次元",
]

nodes = []
for i, row in df.iterrows():
    content = row["userinput"]
    query = {"query": {"term": {"content.keyword": content}}}

    # 执行删除操作
    response = es.delete_by_query(index=INDEX_NAME, body=query)

    # 打印删除结果
    print(f"Deleted documents containing '{content}': {response['deleted']} documents")
    for tag in tags:
        data: RagData = RagData(
            input=row["userinput"],
            target="朋友",
            tag=tag,
            mode="polish",
            examples=row["exposure_content"].split("\n"),
        )
        node: TextNode = data.to_text_node()
        nodes.append(node)
index(nodes, INDEX_NAME)
