# Requires transformers>=4.51.0

import torch
import torch.nn.functional as F

from torch import Tensor
from transformers import AutoTokenizer, AutoModel


class Qwen3TextEmbedding:
    def __init__(self, model_name='Qwen/Qwen3-Embedding-0.6B', max_length=8192, enable_flash_attention=False):
        self.model_name = model_name
        self.max_length = max_length
        self.tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='left')
        self.task = '对于给定的古文文本，找到内容最相关的段落'
        if enable_flash_attention:
            self.model = AutoModel.from_pretrained(
                model_name, 
                attn_implementation="flash_attention_2", 
                torch_dtype=torch.float16
            ).cuda()
        else:
            self.model = AutoModel.from_pretrained(model_name)

    @staticmethod
    def last_token_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tensor:
        left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0])
        if left_padding:
            return last_hidden_states[:, -1]
        sequence_lengths = attention_mask.sum(dim=1) - 1
        batch_size = last_hidden_states.shape[0]
        return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths]

    @staticmethod
    def get_detailed_instruct(task_description: str, query: str) -> str:
        return f'Instruct: {task_description}\nQuery:{query}'

    def tokenize_inputs(self, input_texts):
        batch_dict = self.tokenizer(
            input_texts,
            padding=True,
            truncation=True,
            max_length=self.max_length,
            return_tensors="pt"
        )
        batch_dict.to(self.model.device)
        return batch_dict

    def get_embeddings(self, input_texts):
        batch_dict = self.tokenize_inputs(input_texts)
        outputs = self.model(**batch_dict)
        embeddings = self.last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
        return F.normalize(embeddings, p=2, dim=1)

    def calculate_scores(self, queries, documents):
        if type(documents) is not list:
            documents = [documents]
        input_texts = [self.get_detailed_instruct(self.task, query) for query in queries] + documents
        embeddings = self.get_embeddings(input_texts)
        return (embeddings[:len(queries)] @ embeddings[len(queries):].T)


if __name__ == "__main__":
    task = 'Given a web search query, retrieve relevant passages that answer the query'
    # Each query must come with a one-sentence instruction that describes the task
    queries = [
        Qwen3TextEmbedding.get_detailed_instruct(task, 'What is the capital of China?'),
        Qwen3TextEmbedding.get_detailed_instruct(task, 'Explain gravity')
    ]
    # No need to add instruction for retrieval documents
    documents = [
        "The capital of China is Beijing.",
        "Gravity is a force that attracts two bodies towards each other. It gives weight to physical objects and is responsible for the movement of planets around the sun."
    ]

    embedder = Qwen3TextEmbedding()
    scores = embedder.calculate_scores(queries, documents)
    print(scores.tolist())
    # [[0.7645568251609802, 0.14142508804798126], [0.13549736142158508, 0.5999549627304077]]