File size: 666 Bytes
3c0443b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

# Path of fine-tuned model
model_path = "./fine_tuned_model"

# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path)

# Create chatbot pipeline
chatbot = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    device=0 if torch.cuda.is_available() else -1  # Use GPU if available
)

# Example usage
prompt = "Hello, can you tell me some fun facts about european legislation?"
response = chatbot(prompt, max_length=100, do_sample=True, temperature=0.7)
print(response[0]['generated_text'])