Spaces:
Running
Running
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Load the Llama model and tokenizer | |
model_name = "meta-llama/Llama-3.2-1B" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Function to generate a response | |
def generate_response(prompt, max_length=100): | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate(inputs["input_ids"], max_length=max_length) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Test English response | |
english_prompt = "Hi, Welcome to Saudi Travel and Tourism, How can I help you?" | |
english_response = generate_response(english_prompt) | |
print("English Response:", english_response) | |
# Test Arabic response | |
arabic_prompt = "ู ุฑุญุจุงู ุจูู ูู ู ููุน ุงูุณูุฑ ูุงูุณูุงุญุฉ ุงูุณุนูุฏูุ ููู ูู ูููู ู ุณุงุนุฏุชู ุงูููู ุ" | |
arabic_response = generate_response(arabic_prompt) | |
print("Arabic Response:", arabic_response) | |