import logging
import torch
import math
import numpy as np
from numpy import ndarray
from transformers import BertTokenizerFast, BertModel, AutoModel, AutoTokenizer,RoFormerTokenizer,RoFormerModel,BertTokenizer,RoFormerForCausalLM
from sklearn.metrics.pairwise import cosine_similarity
from typing import Union, List

# 模型路径--预训练的模型，可以根据业务进行微调（分类，查询相似语句等）bertEvaluate.py
model_path = "roformer_chinese_sim_char_base"
#tokenizer = BertTokenizerFast.from_pretrained(model_path)
#model = BertModel.from_pretrained(model_path)

#tokenizer = AutoTokenizer.from_pretrained(model_path)
#model = AutoModel.from_pretrained(model_path)

# 加载 RoFormer 分词器
tokenizer = RoFormerTokenizer.from_pretrained(model_path)
# 加载 RoFormer 模型
model = RoFormerModel.from_pretrained(model_path)
#model = RoFormerForCausalLM.from_pretrained(model_path)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
index = {}

def mean_pooling(token_embeddings, attention_mask):
    attention_mask = torch.unsqueeze(attention_mask, dim=-1)
    token_embeddings = token_embeddings * attention_mask
    seqlen = torch.sum(attention_mask, dim=1)
    embeddings = torch.sum(token_embeddings, dim=1) / seqlen
    return embeddings

def encode(sentences: Union[str, List[str]],
           batch_size: int = 64,
           convert_to_numpy: bool = False,
           normalize_to_unit: bool = False):

    input_was_string = False
    if isinstance(sentences, str):
        sentences = [sentences]
        input_was_string = True

    all_embeddings = []
    if len(sentences) < 1:
        return all_embeddings
    length_sorted_idx = np.argsort([-len(sen) for sen in sentences])
    sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
    num_batches = int((len(sentences) - 1) / batch_size) + 1
    with torch.no_grad():
        for i in range(num_batches):
            start = i * batch_size
            end = min((i + 1) * batch_size, len(sentences_sorted))
            inputs = tokenizer(
                sentences_sorted[start:end],
                padding=True,
                truncation=True,
                max_length=512,
                return_tensors="pt"
            ).to(device)
            outputs = model(**inputs)

            #embeddings = outputs.logits
            embeddings = mean_pooling(outputs[0], inputs["attention_mask"])

            if normalize_to_unit:
                # 归一化到单位向量
                embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
            if convert_to_numpy:
                embeddings = embeddings.cpu()

            all_embeddings.extend(embeddings)

    all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]

    if convert_to_numpy:
        all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])

    else:
        all_embeddings = torch.stack(all_embeddings)
    if input_was_string:
        all_embeddings = all_embeddings[0]
    return all_embeddings

def build_index(sentences_or_file_path: Union[str, List[str]], batch_size: int = 64):

    if isinstance(sentences_or_file_path, str):
        sentences = []
        with open(sentences_or_file_path, "r", encoding="utf-8") as f:
            logging.info("Loading sentences from %s" % (sentences_or_file_path))
            for line in f:
                sentences.append(line.rstrip())
        sentences_or_file_path = sentences

    logging.info("Encoding embeddings for sentences")
    embeddings = encode(sentences_or_file_path, batch_size=batch_size, normalize_to_unit=True, convert_to_numpy=True)

    logging.info("Building index")

    index["sentences"] = sentences_or_file_path
    index["index"] = embeddings

def similarity(queries: Union[str, List[str]],
               keys: Union[str, List[str], ndarray],
               batch_size: int = 64,
               return_matrix: bool = False):
    query_vecs = encode(queries, batch_size=batch_size, normalize_to_unit=True)

    if not isinstance(keys, ndarray):
        key_vecs = encode(keys, batch_size=batch_size, normalize_to_unit=True)
    else:
        key_vecs = keys

    single_query, single_key = len(query_vecs.shape) == 1, len(key_vecs.shape) == 1
    if single_query:
        query_vecs = query_vecs.unsqueeze(0)
    if single_key:
        if isinstance(key_vecs, ndarray):
            key_vecs = key_vecs.reshape(1, -1)
        else:
            key_vecs = key_vecs.unsqueeze(0)

    if return_matrix:
        if isinstance(key_vecs, ndarray):
            query_vecs = query_vecs.cpu().numpy()
            similarity = np.matmul(query_vecs, np.transpose(key_vecs))
        else:
            similarity = torch.mm(query_vecs, key_vecs.transpose(0, 1)).cpu().numpy()
        if single_query:
            similarity = similarity[0]
            if single_key:
                similarity = float(similarity[0])
    else:
        assert query_vecs.shape[0] == key_vecs.shape[0]
        if isinstance(key_vecs, ndarray):
            query_vecs = query_vecs.cpu().numpy()
            similarity = np.sum(query_vecs * key_vecs, axis=-1)
        else:
            similarity = torch.sum(query_vecs * key_vecs, dim=-1).cpu().numpy()
        if single_query:
            similarity = float(similarity[0])
    return similarity


if __name__ == '__main__':
    sentences_path = "data/sentences.txt"
    build_index(sentences_path)
    queries=['一个男人在弹吉他。']
    similarity = similarity(queries, index["index"], return_matrix=True).tolist()
    # threshold为最低相似度阈值，top_k为查找的近邻个数
    threshold = 0.8
    top_k = 15
    id_scores = []
    for i, s in enumerate(similarity[0]):
        if s >= threshold:
            id_scores.append((i, s))

    id_scores = sorted(id_scores, key=lambda x: x[1], reverse=True)[:top_k]
    results = [(index["sentences"][idx], score) for idx, score in id_scores]
    print(results)



