import os.path

from langchain_core.embeddings import Embeddings
from typing import List, Any, Dict, Optional
import torch
from torch import Tensor
import torch.nn.functional as F
from transformers import AutoModel, AutoTokenizer, AutoModelForCausalLM
from langchain_community.document_loaders.pdf import BasePDFLoader

DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

def _last_token_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tensor:
    left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0])
    if left_padding:
        return last_hidden_states[:, -1]
    else:
        sequence_lengths = attention_mask.sum(dim=1) - 1
        batch_size = last_hidden_states.shape[0]
        return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths]

print(os.path.join(os.getcwd(),"../Qwen3-Embedding-0.6B" ),)
class QwenEmbeddings(Embeddings):
    """qwen embedding"""
    model: Any = None
    tokenizer: Any = None
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        print("正在加载Embedding模型...")
        self.tokenizer = AutoTokenizer.from_pretrained(
            pretrained_model_name_or_path=os.path.join(os.getcwd(),"../Qwen3-Embedding-0.6B" ),
            padding_side="left"
        )
        self.model = AutoModel.from_pretrained(
            pretrained_model_name_or_path=os.path.join(os.getcwd(),"../Qwen3-Embedding-0.6B" ),
            torch_dtype=torch.float16
        ).to(DEVICE).eval()
        self.max_length = 8192
        print("Embedding模型加载成功...")

    def _get_instruct(self, task_description: str, query: str) -> str:
        return f'Instruct: {task_description}\nQuery:{query}'

    def _embed(self, texts: List[str]) -> List[List[float]]:
        with torch.no_grad():
            batch_dict = self.tokenizer(
                texts, padding=True, truncation=True,
                max_length=self.max_length, return_tensors="pt"
            )
            batch_dict = {k: v.to(DEVICE) for k, v in batch_dict.items()}
            outputs = self.model(**batch_dict)
            embeddings = _last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
            normalized_embeddings = F.normalize(embeddings, p=2, dim=1)
            return normalized_embeddings.cpu().tolist()


    def embed_query(self, text: str) -> list[float]:
        """单查询embedding"""
        task = "Given a web search query, retrieve relevant passages that answer the query"
        instructed_text = self._get_instruct(task, text)
        embedding = self._embed([instructed_text])
        return embedding[0]

    def embed_documents(self, texts: list[str]) -> list[list[float]]:
        """计算文档的embedding"""
        return self._embed(texts)

class PaddlePDF(BasePDFLoader):
    """paddlePdf解析类"""
    def __init__(self, file_p:str):
        super().__init__(file_path=file_p)




