Spaces:
Sleeping
Sleeping
File size: 1,530 Bytes
3f9f23d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import torch.nn.functional as F
import torch
from torch import Tensor
from transformers import AutoTokenizer, AutoModel
import numpy as np
import pandas as pd
def average_pool(last_hidden_states: Tensor,
attention_mask: Tensor) -> Tensor:
last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)
return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
paper_df = pd.read_csv('anlp2024.tsv', names=["pid", "title"], sep="\t")
assert len(paper_df) == 599
# paper_df の title 列にあるテキストをリストに変換した上で、各文字列の戦闘に "passage: " をそれぞれ付け加えて input_texts とする
input_texts = [f"passage: {title}" for title in paper_df["title"].tolist()]
assert input_texts[0] == "passage: 市況コメント生成のための少数事例選択"
assert input_texts[-1] == "passage: Event-Centered Prompting for Text Style Transfer"
tokenizer = AutoTokenizer.from_pretrained('intfloat/multilingual-e5-large')
model = AutoModel.from_pretrained('intfloat/multilingual-e5-large')
# Tokenize the input texts
batch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt')
with torch.no_grad():
outputs = model(**batch_dict)
embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
embeddings = F.normalize(embeddings, p=2, dim=1)
assert embeddings.shape == (599, 1024)
np.savez("anlp2024", embeddings.detach().numpy().copy()) |