#-*- coding:utf-8 -*-
import sys
from sentence_transformers import SentenceTransformer
import scipy.spatial
import pickle
import numpy as np
import nltk
from nltk.tokenize import sent_tokenize
# nltk.download('punkt')
import io
from nltk.tokenize import RegexpTokenizer
# import nltk
# nltk.download('punkt')
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
def sentence_token_nltk(str):
    tokenizer = RegexpTokenizer(".*?[。！？]")
    sent_tokenize_list = tokenizer.tokenize(str)
    # sent_tokenize_list = sent_tokenize(str, language="chinese")
    return sent_tokenize_list

def find(queries, num):
    embedder = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2')
    # # 语料实例
    # corpus = ['A man is eating food.',
    #           'A man is eating a piece of bread.',
    #           'The girl is carrying a baby.',
    #           'A man is riding a horse.',
    #           'A woman is playing violin.',
    #           'Two men pushed carts through the woods.',
    #           'A man is riding a white horse on an enclosed ground.',
    #           'A monkey is playing drums.',
    #           'A cheetah is running behind its prey.'
    #           ]
    # corpus_embeddings = embedder.encode(corpus)
    scores = []
    with open("./data/train/embeddings.pkl", 'rb') as f:
        corpus_embeddings = pickle.load(f)
        corpus_embeddings = np.array(corpus_embeddings)
    
    with open("./data/train/filenames.pkl", 'rb') as f:
        corpus_filenames = pickle.load(f)
    # 待查询的句子
    query_embeddings = embedder.encode(queries)
    # 对于每个句子，使用余弦相似度查询最接近的5个句子
    closest_n = 1
    print(len(queries))
    for query, query_embedding in zip(queries, query_embeddings):
        distances = scipy.spatial.distance.cdist([query_embedding], corpus_embeddings, "cosine")[0]
        # 按照距离逆序
        results = zip(range(len(distances)), distances)
        results = sorted(results, key=lambda x: x[1])
        print("======================")
        print("Query:", query)
        print("Result:Top 1 most similar sentences in corpus:")
        for idx, distance in results[0:closest_n]:
            print(corpus_filenames[idx], "(Score: %.4f)" % (1 - distance))
            scores.append((query, query_embedding, 1 - distance))
    scores.sort(key = lambda x:x[2], reverse = True)
    bestQueries = []
    for i in range(num):
        bestQueries.append(scores[i][0])
    return bestQueries

def findPosition(text, bestQueries):
    print("find position")
    print(bestQueries)
    result = []
    for que in bestQueries:
        position = text.find(que)
        result.append(position + len(que))
    return result

# if __name__ == '__main__':
#     text = 'A man is eating pasta. Someone in a gorilla costume is playing a set of drums. A cheetah chases prey on across a field.'
#     queries = sentence_token_nltk(text)
#     best = find(queries, 2)
#     position = findPosition(text, best)
#     print(position)
#     print(best)


def findBestImages(queries, num):
    embedder = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2')
    # # 语料实例
    # corpus = ['A man is eating food.',
    #           'A man is eating a piece of bread.',
    #           'The girl is carrying a baby.',
    #           'A man is riding a horse.',
    #           'A woman is playing violin.',
    #           'Two men pushed carts through the woods.',
    #           'A man is riding a white horse on an enclosed ground.',
    #           'A monkey is playing drums.',
    #           'A cheetah is running behind its prey.'
    #           ]
    # corpus_embeddings = embedder.encode(corpus)
    scores = []
    with open("./data/train/embeddings.pkl", 'rb') as f:
        corpus_embeddings = pickle.load(f)
        corpus_embeddings = np.array(corpus_embeddings)
    
    with open("./data/train/filenames.pkl", 'rb') as f:
        corpus_filenames = pickle.load(f)
    # 待查询的句子
    query_embeddings = embedder.encode(queries)
    # 对于每个句子，使用余弦相似度查询最接近的5个句子
    closest_n = 1
    for query, query_embedding in zip(queries, query_embeddings):
        distances = scipy.spatial.distance.cdist([query_embedding], corpus_embeddings, "cosine")[0]
        # 按照距离逆序
        results = zip(range(len(distances)), distances)
        results = sorted(results, key=lambda x: x[1])
        print("======================")
        print("Query:", query)
        print("Result:Top 1 most similar sentences in corpus:")
        for idx, distance in results[0:closest_n]:
            print(corpus_filenames[idx], "(Score: %.4f)" % (1 - distance))
            scores.append((corpus_filenames[idx], query, query_embedding, 1 - distance))
    scores.sort(key = lambda x:x[3], reverse = True)
    bestImages = []
    for i in range(num):
        bestImages.append(scores[i][0])
    return bestImages