import logging
import torch
import math
import numpy as np
from numpy import ndarray
from sympy import false
from transformers import BertTokenizerFast, BertModel, AutoModel, AutoTokenizer,RoFormerTokenizer,RoFormerModel,BertTokenizer,RoFormerForCausalLM
from sklearn.metrics.pairwise import cosine_similarity
from typing import Union, List

# 模型路径--预训练的模型，可以根据业务进行微调（分类，查询相似语句等）bertEvaluate.py
model_path = "roformer_chinese_sim_char_base"
#tokenizer = BertTokenizerFast.from_pretrained(model_path)
#model = BertModel.from_pretrained(model_path)

#tokenizer = AutoTokenizer.from_pretrained(model_path)
#model = AutoModel.from_pretrained(model_path)

# 加载 RoFormer 分词器
tokenizer = RoFormerTokenizer.from_pretrained(model_path)
# 加载 RoFormer 模型
model = RoFormerModel.from_pretrained(model_path)
#model = RoFormerForCausalLM.from_pretrained(model_path)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
index = {}

def mean_pooling(token_embeddings, attention_mask):
    attention_mask = torch.unsqueeze(attention_mask, dim=-1)
    token_embeddings = token_embeddings * attention_mask
    seqlen = torch.sum(attention_mask, dim=1)
    embeddings = torch.sum(token_embeddings, dim=1) / seqlen
    return embeddings

def encode(
           sentences: Union[str, List[str]],
           batch_size: int = 64,
           convert_to_numpy: bool = False,
           normalize_to_unit: bool = False):
    input_was_string = False
    if isinstance(sentences, str):
        sentences = [sentences]
        input_was_string = True

    all_embeddings = []
    if len(sentences) < 1:
        return all_embeddings
    length_sorted_idx = np.argsort([-len(sen) for sen in sentences])
    sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
    num_batches = int((len(sentences) - 1) / batch_size) + 1
    with torch.no_grad():
        for i in range(num_batches):
            start = i * batch_size
            end = min((i + 1) * batch_size, len(sentences_sorted))
            inputs = tokenizer(
                sentences_sorted[start:end],
                padding=True,
                truncation=True,
                max_length=512,
                return_tensors="pt"
            ).to(device)
            outputs = model(**inputs)

            #embeddings = outputs.logits
            embeddings = mean_pooling(outputs[0], inputs["attention_mask"])

            if normalize_to_unit:
                # 归一化到单位向量
                embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
            if convert_to_numpy:
                embeddings = embeddings.cpu()

            all_embeddings.extend(embeddings)

    all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]

    if convert_to_numpy:
        # 确保所有张量具有相同的形状
        all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])

    else:
        all_embeddings = torch.stack(all_embeddings)
    if input_was_string:
        all_embeddings = all_embeddings[0]
    return all_embeddings

def build_index(sentences_or_file_path: Union[str, List[str]],
                ann_search: bool = False,
                gpu_index: bool = False,
                gpu_memory: int = 16,
                n_search: int = 64,
                batch_size: int = 64):
    try:
        import faiss
        assert hasattr(faiss, "IndexFlatIP")
    except:
        logging.warning(
            "Fail to import faiss. If you want to use faiss, install faiss through PyPI. Now the program continues with brute force search.")

    if isinstance(sentences_or_file_path, str):
        sentences = []
        with open(sentences_or_file_path, "r", encoding="utf-8") as f:
            logging.info("Loading sentences from %s" % (sentences_or_file_path))
            for line in f:
                sentences.append(line.rstrip())
        sentences_or_file_path = sentences

    logging.info("Encoding embeddings for sentences")
    embeddings = encode(sentences_or_file_path, batch_size=batch_size, normalize_to_unit=True, convert_to_numpy=True)

    logging.info("Building index")

    d = embeddings.shape[1]
    nlist = int(math.sqrt(embeddings.shape[0]))
    quantizer = faiss.IndexFlatIP(d)
    if ann_search:
        new_index = faiss.IndexIVFFlat(quantizer, d, nlist, faiss.METRIC_INNER_PRODUCT)
    else:
        new_index = quantizer

    if gpu_index:
        if hasattr(faiss, "StandardGpuResources"):
            logging.info("Use GPU-version faiss")
            res = faiss.StandardGpuResources()
            res.setTempMemory(gpu_memory * 1024 * 1024 * 1024)
            new_index = faiss.index_cpu_to_gpu(res, 0, new_index)
        else:
            logging.info("Use CPU-version faiss")

    if ann_search:
        # 检查 embeddings 是否包含 NaN 或无穷大值
        if np.isnan(embeddings).any() or np.isinf(embeddings).any():
            raise ValueError("Embeddings contain invalid values (NaN or Inf)")
        new_index.train(embeddings)

    new_index.add(embeddings.astype(np.float32))
    new_index.nprobe = min(nlist, n_search)

    index["sentences"] = sentences_or_file_path
    index["index"] = new_index

def similarity(queries: Union[str, List[str]]):

    query_vecs = encode(queries, normalize_to_unit=True, convert_to_numpy=True)
    if isinstance(queries, str):
        query_vecs = np.expand_dims(query_vecs, axis=0)
    return query_vecs



if __name__ == '__main__':
    sentences_path = "data/sentences.txt"
    build_index(sentences_path, ann_search=False, gpu_index=False, n_search=32)
    queries='足球运动员试图铲球。'
    similarity = similarity(queries)

    # threshold为最低相似度阈值，top_k为查找的近邻个数
    top_k = 15
    threshold = 0.8
    distance, idx = index["index"].search(similarity.astype(np.float32), top_k)
    #print(distance, idx)
    def single_result(dist, idx):
        results = [(index["sentences"][i], s) for i, s in zip(idx, dist) if s >= threshold]
        return results

    id_scores = single_result(distance[0], idx[0])
    id_scores = sorted(id_scores, key=lambda x: x[1], reverse=True)[:top_k]
    print(id_scores)



