--- license: mit datasets: - KomeijiForce/Inbedder-Pretrain-Data language: - en --- ```python import torch from torch import nn from torch.nn.functional import gelu, cosine_similarity from transformers import AutoTokenizer, AutoModel, AutoModelForMaskedLM import numpy as np class InBedder(): def __init__(self, path='KomeijiForce/inbedder-roberta-large', device='cuda:0'): model = AutoModelForMaskedLM.from_pretrained(path) self.tokenizer = AutoTokenizer.from_pretrained(path) self.model = model.roberta self.dense = model.lm_head.dense self.layer_norm = model.lm_head.layer_norm self.device = torch.device(device) self.model = self.model.to(self.device) self.dense = self.dense.to(self.device) self.layer_norm = self.layer_norm.to(self.device) self.vocab = self.tokenizer.get_vocab() self.vocab = {self.vocab[key]:key for key in self.vocab} def encode(self, input_texts, instruction, n_mask): if type(instruction) == str: prompts = [instruction + self.tokenizer.mask_token*n_mask for input_text in input_texts] elif type(instruction) == list: prompts = [inst + self.tokenizer.mask_token*n_mask for inst in instruction] inputs = self.tokenizer(input_texts, prompts, padding=True, truncation=True, return_tensors='pt').to(self.device) mask = inputs.input_ids.eq(self.tokenizer.mask_token_id) outputs = self.model(**inputs) logits = outputs.last_hidden_state[mask] logits = self.layer_norm(gelu(self.dense(logits))) logits = logits.reshape(len(input_texts), n_mask, -1) logits = logits.mean(1) logits = (logits - logits.mean(1, keepdim=True)) / logits.std(1, keepdim=True) return logits inbedder = InBedder(path='KomeijiForce/inbedder-roberta-large', device='cpu') texts = ["I love cat!", "I love dog!", "I dislike cat!"] instruction = "What is the animal mentioned here?" embeddings = inbedder.encode(texts, instruction, 3) cosine_similarity(embeddings[:1], embeddings[1:], dim=1) # tensor([0.9374, 0.9917], grad_fn=) texts = ["I love cat!", "I love dog!", "I dislike cat!"] instruction = "What is emotion expressed here?" embeddings = inbedder.encode(texts, instruction, 3) cosine_similarity(embeddings[:1], embeddings[1:], dim=1) # tensor([0.9859, 0.8537], grad_fn=) ```