from elasticsearch import Elasticsearch, helpers
import json
import hashlib


def create_es_connection(es_host, es_auth):
    """
    创建 Elasticsearch 连接
    :param es_host: Elasticsearch 主机地址
    :param es_auth: 认证信息 (username, password)
    :return: Elasticsearch 连接对象
    """
    print("正在创建 Elasticsearch 连接...")
    es = Elasticsearch([es_host], basic_auth=es_auth)
    print("Elasticsearch 连接创建成功")
    return es


def load_mapping(mapping_file):
    """
    加载 mapping.json 文件内容
    :param mapping_file: mapping.json 文件路径
    :return: 映射内容的字典
    """
    with open(mapping_file, "r", encoding="utf-8") as file:
        mapping = json.load(file)
    return mapping


def create_index(es, index_name, mapping):
    """
    创建 Elasticsearch 索引并设置映射
    :param es: Elasticsearch 连接对象
    :param index_name: 索引名称
    :param mapping: 映射内容的字典
    :return: None
    """
    if es.indices.exists(index=index_name):
        return True
    es.indices.create(index=index_name, body=mapping)
    print(f"Index {index_name} created successfully with mapping.")


def delete_es_index(es, index_name):
    """
    删除 Elasticsearch 索引
    :param es: Elasticsearch 连接对象
    :param index_name: 索引名称
    :return: None
    """
    if es.indices.exists(index=index_name):
        es.indices.delete(index=index_name)
        print(f"Index {index_name} deleted successfully.")
    else:
        print(f"Index {index_name} does not exist.")


# def store_chunks(sources, chunks, es, index_name):
#     for file_source, chunk in zip(sources, chunks):
#         actions = []
#         d = {
#             "_index": index_name,
#             "_source": {"content": chunk, "Slang": [], "file_name": file_source},
#         }
#         actions.append(d)
#         if actions:
#             print("uploading: " + file_source)
#             helpers.bulk(es, actions)
#         print("uploaded: " + file_source)
def store_chunks(sources, chunks, es, index_name):
    """
    存储文档块到 Elasticsearch，使用内容哈希作为文档ID来去重
    :param sources: 文件来源列表
    :param chunks: 文档块列表
    :param es: Elasticsearch 连接对象
    :param index_name: 索引名称
    """
    try:
        actions = []
        for file_source, chunk in zip(sources, chunks):
            # 计算内容的哈希值作为文档ID
            content_hash = hashlib.md5(chunk.encode("utf-8")).hexdigest()

            d = {
                "_index": index_name,
                "_id": content_hash,  # 使用内容哈希作为文档ID
                "_source": {"content": chunk, "Slang": [], "file_name": file_source},
            }
            actions.append(d)

        if actions:
            # 使用 bulk 操作，当文档ID已存在时会自动跳过
            success, failed = helpers.bulk(es, actions, stats_only=True)
            print(f"Processed documents: {success} succeeded, {failed} failed")

    except Exception as e:
        print(f"Error storing chunks: {str(e)}")
        raise


# from create_base_v3 import read_qa_json

# 使用示例
es_host = "http://es-svc-0op7z.zhaojin.svc.cluster.local:9200"
es_auth = ("elastic", "infini_truth_ai")
index_name = "mindqa"
mapping_file = r"/root/data/mindqa/mapping.json"

es = create_es_connection(es_host, es_auth)
delete_all = {"query": {"match_all": {}}}
if es.indices.exists(index=index_name):
    es.delete_by_query(index=index_name, body=delete_all)
mapping = load_mapping(mapping_file)
create_index(es, index_name, mapping)

file_path1 = r"/root/data/mindqa/qac_data/all.json"
sources_path = r"/root/data/mindqa/qac_data/all_sources.json"
with open(sources_path, "r", encoding="utf-8") as f:
    sources = json.load(f)
with open(file_path1, "r", encoding="utf-8") as f:
    chunks = json.load(f)
# _, _, sources, chunks = read_qa_json(file_path1)
store_chunks(sources, chunks, es, index_name)
# """
# curl -u "elastic:infini_truth_ai" http://36.103.203.10:31092
# """
