'''
author:        Wang Chenyang <cy-wang21@mails.tsinghua.edu.cn>
date:          2025-07-30 12:34:52
Copyright © Department of Physics, Tsinghua University. All rights reserved
'''
from transformers import AutoTokenizer, AutoModel
import torch
import numpy as np

class BERTEmbedding:
    def __init__(self, model_name="thellert/physbert_cased"):
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.model = AutoModel.from_pretrained(model_name)
        self.max_token_length = self.tokenizer.model_max_length
        print("模型:", model_name)
        print("最大token长度:", self.max_token_length)
    
    def embed_token(self, token: str) -> torch.Tensor:
        inputs = self.tokenizer(token, return_tensors="pt")
        outputs = self.model(**inputs)
        return outputs.last_hidden_state[:, 1:-1, :]
    
    def embed_sentence(self, sentence: str) -> torch.Tensor:
        inputs = self.tokenizer(sentence, return_tensors="pt", truncation=True)
        outputs = self.model(**inputs)
        return outputs.last_hidden_state[:, 0, :]   # CLS token
    
    def embed_sentence_list(self, sentence_list: list[str]) -> torch.Tensor:
        inputs = self.tokenizer(sentence_list, return_tensors="pt", padding=True, truncation=True)
        outputs = self.model(**inputs)
        return outputs.last_hidden_state[:, 0, :]   # CLS token


def get_embedding_item_list(
    bert: BERTEmbedding, 
    all_items: list[dict],
    batch_size: int = 10) -> np.ndarray:
    all_sentences = []
    for item in all_items:
        if ("title" in item) and ("abstract" in item):
            all_sentences.append(item["title"] + " " + item["abstract"])
        elif "title" in item:
            all_sentences.append(item["title"])
        else:
            raise ValueError("Item must have title")
    all_embed = []
    for i in range(0, len(all_sentences), batch_size):
        batch_sentences = all_sentences[i:i + batch_size]
        embed_items = bert.embed_sentence_list(batch_sentences).detach().numpy().T
        # 归一化
        embed_items = embed_items / np.linalg.norm(embed_items, axis=0)
        all_embed.append(embed_items)
    return np.concatenate(all_embed, axis=1)
