Spaces:
Sleeping
Sleeping
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline | |
| input = "no se tia estoy arta d todo. jamas hubiera dicho eso" | |
| # pipe = pipeline("text2text-generation", model="VioletaViCan/BART_CA_ES_curriculumLearning", device="0") | |
| # pipe(input) | |
| tokenizer = AutoTokenizer.from_pretrained("VioletaViCan/BART_CA_ES_curriculumLearning") | |
| model = AutoModelForSeq2SeqLM.from_pretrained("VioletaViCan/BART_CA_ES_curriculumLearning") | |
| inputs = tokenizer(input, max_length = 1024, return_tensors="pt", truncation=True, padding=True) | |
| if 'token_type_ids' in inputs: | |
| inputs.pop('token_type_ids') | |
| output = model.generate(**inputs) | |
| print(output) | |
| generated_text = tokenizer.decode(output[0], max_new_tokens=20, skip_special_tokens=True, clean_up_tokenization_spaces=True) | |
| print(generated_text) |