# -*- coding: utf-8 -*-
# pip install sentence-transformers
from sentence_transformers import SentenceTransformer, CrossEncoder


def test_embedding_model():
    # 1. Load a pretrained Sentence Transformer model
    model = SentenceTransformer("all-MiniLM-L6-v2")

    # The sentences to encode
    sentences = [
        "The weather is lovely today.",
        "It's so sunny outside!",
        "He drove to the stadium.",
    ]

    # 2. Calculate embeddings by calling model.encode()
    embeddings = model.encode(sentences)
    print(embeddings.shape)
    # [3, 384]

    # 3. Calculate the embedding similarities
    similarities = model.similarity(embeddings, embeddings)
    print(similarities)
    # tensor([[1.0000, 0.6660, 0.1046],
    #         [0.6660, 1.0000, 0.1411],
    #         [0.1046, 0.1411, 1.0000]])
    pass


def test_reranker_model():

    # 1. Load a pretrained CrossEncoder model
    model = CrossEncoder("cross-encoder/ms-marco-MiniLM-L6-v2")

    # The texts for which to predict similarity scores
    query = "How many people live in Berlin?"
    passages = [
        "Berlin had a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers.",
        "Berlin has a yearly total of about 135 million day visitors, making it one of the most-visited cities in the European Union.",
        "In 2013 around 600,000 Berliners were registered in one of the more than 2,300 sport and fitness clubs.",
    ]

    # 2a. Either predict scores pairs of texts
    scores = model.predict([(query, passage) for passage in passages])
    print(scores)
    # => [8.607139 5.506266 6.352977]

    # 2b. Or rank a list of passages for a query
    ranks = model.rank(query, passages, return_documents=True)

    print("Query:", query)
    for rank in ranks:
        print(f"- #{rank['corpus_id']} ({rank['score']:.2f}): {rank['text']}")
    """
    Query: How many people live in Berlin?
    - #0 (8.61): Berlin had a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers.
    - #2 (6.35): In 2013 around 600,000 Berliners were registered in one of the more than 2,300 sport and fitness clubs.
    - #1 (5.51): Berlin has a yearly total of about 135 million day visitors, making it one of the most-visited cities in the European Union.
    """
    pass

def test_sparse_encoder_model():
    from sentence_transformers import SparseEncoder

    # 1. Load a pretrained SparseEncoder model
    model = SparseEncoder("naver/splade-cocondenser-ensembledistil")

    # The sentences to encode
    sentences = [
        "The weather is lovely today.",
        "It's so sunny outside!",
        "He drove to the stadium.",
    ]

    # 2. Calculate sparse embeddings by calling model.encode()
    embeddings = model.encode(sentences)
    print(embeddings.shape)
    # [3, 30522] - sparse representation with vocabulary size dimensions

    # 3. Calculate the embedding similarities
    similarities = model.similarity(embeddings, embeddings)
    print(similarities)
    # tensor([[   35.629,     9.154,     0.098],
    #         [    9.154,    27.478,     0.019],
    #         [    0.098,     0.019,    29.553]])

    # 4. Check sparsity stats
    stats = SparseEncoder.sparsity(embeddings)
    print(f"Sparsity: {stats['sparsity_ratio']:.2%}")
    # Sparsity: 99.84%
    pass

if __name__ == '__main__':
    # test_embeddings_model()
    # test_reranker_model()
    test_sparse_encoder_model()

    pass
