File size: 615 Bytes
31277b5
23c8364
 
31277b5
23c8364
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
from transformers import AutoTokenizer, AutoModel
from transformers import RobertaTokenizer, RobertaForMaskedLM, pipeline
import torch

def pt_evaluate():
    tokenizer = RobertaTokenizer.from_pretrained('birgermoell/roberta-swedish-scandi')
    #model = RobertaForMaskedLM.from_pretrained('birgermoell/roberta-swedish-scandi')
    model = RobertaForMaskedLM.from_pretrained("birgermoell/roberta-swedish-scandi", from_flax=True)
    my_unmasker_pipeline = pipeline('fill-mask', model=model, tokenizer=tokenizer)
    output = my_unmasker_pipeline("Huvudstaden i Sverige är <mask>.")
    print(output)

pt_evaluate()