|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
model_name = "aubmindlab/aragpt2-medium" |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
def generate_response(context, question): |
|
input_text = context + "\nسؤال: " + question + "\nإجابة: " |
|
input_ids = tokenizer(input_text, return_tensors="pt").input_ids |
|
output = model.generate(input_ids, max_length=150, num_return_sequences=1) |
|
return tokenizer.decode(output[0], skip_special_tokens=True) |