# do embedding

from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity

model = SentenceTransformer('moka-ai/m3e-base')

#Our sentences we like to encode
sentence = '商品有保价吗'
_sentence = '商品能支持保价吗'

#Sentences are encoded by calling model.encode()
embedding = model.encode(sentence)
_emebdding = model.encode(_sentence)
print(cosine_similarity([embedding], [_emebdding])[0][0])
#Print the embeddings
# for sentence, embedding in zip(sentences, embeddings):
#     print("Sentence:", sentence)
#     print("Embedding:", embedding)
#     print("")
