from onnxruntime import InferenceSession
from transformers import AutoTokenizer
from typing import List
from log_config import logger
import numpy as np
import os

class ONNXExecutionProvider:
    def __init__(self,embed_path:str):
        embed_model_path = os.path.join(embed_path,"embed.onnx")
        # 判断文件是否存在
        if not os.path.isfile(embed_model_path):
            raise ValueError(f"{embed_model_path} 文件不存在")
        self._tokenizer = AutoTokenizer.from_pretrained(embed_path)
        providers = ['CPUExecutionProvider']
        self._session = InferenceSession(embed_model_path, providers=providers)

    def embed_documents(self, sentences) -> List:
        inputs,tokens_num = self.tokenizer(sentences)
        embeddings = self.embedding(inputs)
        return embeddings,tokens_num
    
    def tokenizer(self,sentences:List[str]):
        inputs = self._tokenizer(sentences, padding=True, truncation=True, max_length=512, return_tensors="np")
        token_num = (inputs['attention_mask'].sum().item() - 2 * inputs['attention_mask'].shape[0])
        inputs = {k: v for k, v in inputs.items()}
        return inputs,token_num
    
    def embedding(self,inputs):
        output_name = self._session.get_outputs()[0].name
        outputs_onnx = self._session.run(output_names=[output_name], input_feed=inputs)
        embedding = outputs_onnx[0][:,0]
        norm_arr = np.linalg.norm(embedding, axis=1, keepdims=True)
        embeddings_normalized = embedding / norm_arr
        return embeddings_normalized.tolist()
    
    def encode(self,sentences: List[str] ,batch_size: int = 100):    
        embeddings_list = []
        tokens_num = 0
        # 总批次
        total_batch = len(sentences) // batch_size + (1 if len(sentences) % batch_size > 0 else 0)
        for batch_id in range(total_batch):
            inputs,token_num = self.tokenizer(sentences[batch_id * batch_size:(batch_id + 1) * batch_size])
            tokens_num += token_num
            embeddings = self.embedding(inputs)
            embeddings_list.extend(embeddings)
        return embeddings_list,tokens_num