from langchain_milvus.retrievers import MilvusCollectionHybridSearchRetriever
from langchain_milvus import Milvus
from langchain_milvus.utils.sparse import BM25SparseEmbedding
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import CrossEncoderReranker
from langchain_community.cross_encoders import HuggingFaceCrossEncoder
from patagent.embedding.patsnap_embeddings import PatsnapEmbeddings
from langchain_openai import OpenAIEmbeddings
from patagent.tools import get_confluence_list_by_pageid
from patagent.constant import (
    CONFLUENCE_PAGEIDS,
    TOPK_CONFLUENCE, 
    MILVUS_URI, 
    MILVUS_TOKEN,
    MILVUS_DB,
    IS_REBUILD,
    PATSNAP_API_URL,
    PATSNAP_API_KEY,
    OPENAI_MODEL_HEADERS
)
from pymilvus import (
    Collection,
    CollectionSchema,
    DataType,
    FieldSchema,
    WeightedRanker,
    connections,
    utility
)


# Load
pageIds = CONFLUENCE_PAGEIDS
docs_list = []
for pageId in pageIds:
    docs_list.extend(get_confluence_list_by_pageid(pageId, [("h1", "Header 1"), ("h2", "Header 2"), ("tr", "Table Row")]))

docs = []
metadatas = []
for doc in docs_list:
    x = doc["content"]
    chunks, chunk_size = len(x), len(x)//65000
    if chunk_size > 0:
        ext = [ x[i : i+chunk_size] for i in range(0, chunks, chunk_size) ]
        docs.extend(ext)
        metadatas.append([doc['metadata']] * len(ext))
    else:
        docs.append(x)
        metadatas.append(doc['metadata'])

dense_embedding_func = OpenAIEmbeddings(openai_api_base=PATSNAP_API_URL, openai_api_key=PATSNAP_API_KEY, default_headers=OPENAI_MODEL_HEADERS)
sparse_embedding_func = BM25SparseEmbedding(corpus=docs)

connections.connect(uri=MILVUS_URI, token=MILVUS_TOKEN, db_name=MILVUS_DB)

col_name = "rag_patent"
pk_field = "pk"
dense_field = "dense_vector"
sparse_field = "sparse_vector"
text_field = "text"
metadata_field = "metadata"

if IS_REBUILD:
    Collection(col_name).drop()

# Init or load collection
if not utility.has_collection(col_name):
    print('---INIT CONFLUENCE COLLECTION---')
    
    dense_dim = len(dense_embedding_func.embed_query(docs[1]))
    fields = [
        FieldSchema(
            name=pk_field,
            dtype=DataType.VARCHAR,
            is_primary=True,
            auto_id=True,
            max_length=100,
        ),
        FieldSchema(name=dense_field, dtype=DataType.FLOAT_VECTOR, dim=dense_dim),
        FieldSchema(name=sparse_field, dtype=DataType.SPARSE_FLOAT_VECTOR),
        FieldSchema(name=text_field, dtype=DataType.VARCHAR, max_length=65535),
        FieldSchema(name=metadata_field, dtype=DataType.JSON),
    ]
    
    schema = CollectionSchema(fields=fields, enable_dynamic_field=False)
    collection = Collection(
        name=col_name, schema=schema, consistency_level="Strong"
    )
    dense_index = {"index_type": "FLAT", "metric_type": "IP"}
    collection.create_index(dense_field, dense_index)
    sparse_index = {"index_type": "SPARSE_INVERTED_INDEX", "metric_type": "IP"}
    collection.create_index(sparse_field, sparse_index)
    collection.flush()

    entities = []
    i = 0
    for doc in docs:
        entity = {
            text_field: doc,
            sparse_field: sparse_embedding_func.embed_query(doc),
            dense_field: dense_embedding_func.embed_query(doc),
            metadata_field: metadatas[i]
        }
        entities.append(entity)
        i += 1
    collection.insert(entities)
    collection.load()
else:
    collection = Collection(col_name)
    collection.load()
print('---LOADED CONFLUENCE COLLECTION---')

sparse_search_params = {"metric_type": "IP"}
dense_search_params = {"metric_type": "IP", "params": {}}
hybrid_retriever = MilvusCollectionHybridSearchRetriever(
    collection=collection,
    rerank=WeightedRanker(1, 0.7),
    anns_fields=[dense_field, sparse_field],
    field_embeddings=[dense_embedding_func, sparse_embedding_func],
    field_search_params=[dense_search_params, sparse_search_params],
    top_k=5,
    text_field=text_field,
    output_fields=[text_field, metadata_field]
)

vector_store = Milvus(
    collection_name=col_name,
    embedding_function=dense_embedding_func,
    vector_field=dense_field,
    connection_args={"uri": MILVUS_URI, "token": MILVUS_TOKEN}
)
confluence_retriever = vector_store.as_retriever(search_kwargs={'k': TOPK_CONFLUENCE})

## Reranker for recall documents
# model = HuggingFaceCrossEncoder(model_name="BAAI/bge-reranker-base")
# compressor = CrossEncoderReranker(model=model, top_n=topk_confluence)
# confluence_retriever = ContextualCompressionRetriever(
#     base_compressor=compressor, base_retriever=retriever
# )