Space / pages /libraries /text_processing.py
Nax07onix3's picture
Upload 11 files
46e7aa4 verified
raw
history blame
454 Bytes
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
def model_loading(modelo):
return pipeline("text-generation", model=modelo, trust_remote_code=True)
def data_processing(text, pipe, RAG=False, Adv_prompts=False, max_len=100):
prompt = f"Question: {text}.\nAnswer:"
generated_text = pipe(prompt, max_len=100, num_return_sequences=1, truncation=True)
return generated_text[0]["generated_text"].split("\n")[1].strip()