from transformers import AutoTokenizer, AutoModelForCausalLM # Load the model and tokenizer model_name = "meta-llama/Llama-3.2-1B" # Adjust the path if needed tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Input prompt prompt = "Welcome to Saudi Travel and Tourism! How can I help you today?" inputs = tokenizer(prompt, return_tensors="pt") # Generate output outputs = model.generate(**inputs, max_new_tokens=50) response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Print response print("LLaMA Response:", response)