from typing import List

from llama_index.core import (Settings, SimpleDirectoryReader, StorageContext,
                              VectorStoreIndex)
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores.elasticsearch import (AsyncDenseVectorStrategy,
                                                     ElasticsearchStore)
from xinference.client import Client

###################### Initialization ######################
DEFAULT_INDEX_NAME = "iflytech_assistant_default"
ES_HOST = "localhost"
ES_PORT = 9200
ES_USER = "elastic"
ES_PASSWORD = "telecom12345"
Settings.embed_model = OpenAIEmbedding(
    model_name="bge-large-zh-v1.5", api_base="http://localhost:9997/v1"
)


xinference_client = Client("http://localhost:9997")
ranking_model = xinference_client.get_model("bge-reranker-large")

VECTOR_STORE_CACHE = {}


def get_vector_store(index_name: str):
    if index_name not in VECTOR_STORE_CACHE:
        vector_store = ElasticsearchStore(
            index_name=index_name,
            es_url=f"http://{ES_HOST}:{ES_PORT}",
            es_user=ES_USER,
            es_password=ES_PASSWORD,
            retrieval_strategy=AsyncDenseVectorStrategy(),
        )
        VECTOR_STORE_CACHE[index_name] = vector_store
    else:
        vector_store = VECTOR_STORE_CACHE[index_name]
    return vector_store


############################################################


def index_unstructed_data(folder, index_name: str = DEFAULT_INDEX_NAME):
    vector_store = get_vector_store(index_name)
    loader = SimpleDirectoryReader(
        input_dir=folder,
        exclude_hidden=False,
        recursive=True,
    )

    documents = loader.load_data()
    pipeline = IngestionPipeline(transformations=[TokenTextSplitter()])
    nodes = pipeline.run(documents=documents)
    storage_context = StorageContext.from_defaults(vector_store=vector_store)
    VectorStoreIndex(nodes, storage_context=storage_context, show_progress=True)


def index(nodes: List[TextNode], index_name: str = DEFAULT_INDEX_NAME):
    vector_store = get_vector_store(index_name)
    storage_context = StorageContext.from_defaults(vector_store=vector_store)
    VectorStoreIndex(nodes, storage_context=storage_context, show_progress=True)


def search(
    query: str,
    threshold: float = 0.5,
    index_name: str = DEFAULT_INDEX_NAME,
    filters={},
    rerank=True,
) -> List[TextNode]:
    vector_store = get_vector_store(index_name)
    index = VectorStoreIndex.from_vector_store(vector_store)

    metadata_filters = []
    for key, value in filters.items():
        if isinstance(value, list):
            cur = MetadataFilters(
                filters=[ExactMatchFilter(key=key, value=v) for v in value],
                condition="or",
            )
        else:
            cur = ExactMatchFilter(key=key, value=value)
        metadata_filters.append(cur)
    metadata_filters = MetadataFilters(filters=metadata_filters, condition="and")
    retriever = index.as_retriever(filters=metadata_filters)
    results = retriever.retrieve(query)
    if rerank and len(results) > 0:
        scores = ranking_model.rerank([item.text for item in results], query)
        for i, result in enumerate(results):
            result.score = scores["results"][i]["relevance_score"]
    results = [item for item in results if item.score > threshold]
    results.sort(key=lambda x: x.score, reverse=True)
    return results
