import numpy as np
from dotenv import load_dotenv
from typing import List
from langchain_ollama import OllamaEmbeddings
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from util.file_util import doc_loader, doc_spliter

"""
发模型工具类
ollama 
  embedding_model   
"""
class T2llm:
    """
      nomic-embed-text 向量维度：768
      bge-m3 向量维度:1024
    """
    def __init__(self,
                 ollama_base_url='http://localhost:11434',
                 embedding_model = 'bge-m3',
                 rerank_model = 'D:/modelscope/bge-reranker-large'
                 ):
        self.ollama_base_url= ollama_base_url
        self.embedding_model = embedding_model
        self.rerank_model = rerank_model

    def embed_documents(self, texts: List[str]):
        embed_func = OllamaEmbeddings(base_url=self.ollama_base_url, model=self.embedding_model)
        vectors = embed_func.embed_documents(texts)
        return vectors

    def embed_text(self, text: str):
        embed_func = OllamaEmbeddings(base_url=self.ollama_base_url, model=self.embedding_model)
        vector = embed_func.embed_query(text)
        return vector

    # 文档向量化
    def embed_file(self, file_path):
        loader = doc_loader(file_path)
        texts = doc_spliter(loader)
        # print(texts)
        vectors = self.embed_documents(texts)
        return {'texts': texts, 'vectors':vectors}

    """
    :return 文档对应分数，[0.55,0.15]
    """
    def rerank(self, query:str, documents:list[str]) -> list[float]:
        # 替换为你本地模型的实际路径
        tokenizer = AutoTokenizer.from_pretrained(self.rerank_model)
        model = AutoModelForSequenceClassification.from_pretrained(self.rerank_model)
        model.eval()
        # 构建文本对
        pairs = [[query, doc] for doc in documents]
        with torch.no_grad():
            inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512)
            scores = model(**inputs, return_dict=True).logits.view(-1, ).float()
            for i, score in enumerate(scores):
                print(f"查询: {query}, 文档: {documents[i]}, 得分: {score}")
        scores = scores.tolist()
        return  scores

    # 计算余旋相似度
    def calculate_cosine(self, query: str, documents: list[str]
                         ) -> list[float]:
        query_vector_scores = []
        query_vector = self.embed_text(query)
        for document in documents:
            # transform to NumPy
            vec1 = np.array(query_vector)
            document_vector = self.embed_text(document)
            vec2 = np.array(document_vector)
            # calculate dot product
            dot_product = np.dot(vec1, vec2)
            # calculate norm
            norm_vec1 = np.linalg.norm(vec1)
            norm_vec2 = np.linalg.norm(vec2)
            # calculate cosine similarity
            cosine_sim = dot_product / (norm_vec1 * norm_vec2)
            query_vector_scores.append(cosine_sim)
        return query_vector_scores


if __name__ == '__main__':
    #向量化测试
    # res = T2llm().embed_file("G:\\changyangkj\\GBT 42453-2023 信息安全技术 网络安全态势感知通用技术要求(OCR).pdf")
    # print(res);

    #向量化查询
    # res = T2llm().embed_query("信息安全技术")
    # print(res)
    res = T2llm().rerank("what is panda?", [
    "hi",
    "The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China."
    ])
