import uuid
from typing import List

from elasticsearch8 import Elasticsearch,helpers

"""
ES工具类

向量文档相关操作 关联 es库的索引结果api/rag_knowledge_base_api.py/create_index()

"""
class T2es:

    def __init__(self, hosts=['http://127.0.0.1:9200']):
        self.es = Elasticsearch(hosts)

    def create_index(self, index_name, mapping=None):
        if not self.es.indices.exists(index=index_name):
            if mapping:
                self.es.indices.create(index=index_name, body=mapping)
            else:
                self.es.indices.create(index=index_name)
            print(f"Index {index_name} created successfully.")
        else:
            print(f"Index {index_name} already exists.")

    def delete_index(self, index_name):
        if self.es.indices.exists(index=index_name):
            self.es.indices.delete(index=index_name)
            print(f"Index {index_name} deleted successfully.")
        else:
            print(f"Index {index_name} does not exist.")

    def insert_document(self, index_name, document):
        self.es.index(index=index_name, body=document)
        print(f"Document inserted into {index_name} successfully.")

    def search_documents(self, index_name, query):
        result = self.es.search(index=index_name, body=query)
        return result.get('hits', {}).get('hits', [])

    # 存储单条数据
    def add_vector_data(self, index_name, kb_id, file_name, content, content_vector):
        uid = str(uuid.uuid1())
        data = {
            "kb_id": kb_id,
            "file_name": file_name,
            "u_id": uid,
            "content": content,
            "content_vector": content_vector,
        }
        self.es.index(index=index_name, document=data)

    # 批量增加文档信息
    """
        kb_id 知识库ID 可用于对文档进行分类
    """
    def add_vector_docs(self, index_name, kb_id, file_name, texts, vectors):
        actions = []
        uid = str(uuid.uuid1())
        for chunk, vector in zip(texts, vectors):
            actions.append({
                "_index": index_name,
                "_source": {
                    "kb_id": kb_id,
                    "file_name": file_name,
                    "file_id": uid,
                    "content": chunk,
                    "content_vector": vector,
                }
            })
        for i in range(0,len(actions)-1, 500):
            helpers.bulk(self.es, actions)

    # 查询文档信息
    """
        
    """
    def get_vector_docs(self, es_index: str, vector: List[float], kb_ids: List[str], score_min_threshold: float,
                 size_max_threshold: int, file_id: str = None):
        body = {
            "size": size_max_threshold,
            "min_score": score_min_threshold,
            "query": {
                "script_score": {
                    "query": {
                        "bool": {
                            "must": []
                        }
                    },
                    "script": {
                        "source": "cosineSimilarity(params.query_vector, 'content_vector') + 1.0",
                        "params": {
                            "query_vector": vector}
                    }
                }
            },
            "sort": [
                {
                    "_score": {
                        "order": "desc"
                    }
                }
            ]
        }

        # 判断 kb_ids 是否有效并添加查询条件
        if kb_ids:
            body["query"]["script_score"]["query"]["bool"]["must"].append({
                "terms": {
                    "kb_id": kb_ids
                }
            })

        if file_id:
            body["query"]["script_score"]["query"]["bool"]["must"].append({
                "term": {
                    "file_id": file_id
                }
            })

        response = self.es.search(index=es_index, body=body)
        hits = response["hits"]["hits"]
        results = []
        for hit in hits:
            score = hit["_score"]
            source = hit["_source"]
            results.append(
                {
                    "kb_id": source["kb_id"],
                    "file_id": source["file_id"],
                    "file_name": source["file_name"],
                    "content": source["content"],
                    "score": score
                }
            )
        return results

    #关闭
    def close(self):
        self.es.close()

# 使用示例
if __name__ == "__main__":
    es_util = T2es()
    index_name = "test_index"
    mapping = {
        "mappings": {
            "properties": {
                "title": {"type": "text"},
                "content": {"type": "text"}
            }
        }
    }
    es_util.create_index(index_name, mapping)

    document = {
        "title": "Sample Title",
        "content": "Sample Content"
    }
    es_util.insert_document(index_name, document)

    query = {
        "query": {
            "match": {
                "title": "Sample"
            }
        }
    }
    search_results = es_util.search_documents(index_name, query)
    for hit in search_results:
        print(hit['_source'])

    es_util.delete_index(index_name)
