File size: 1,183 Bytes
10a94ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
from transformers import AutoModelForCausalLM, AutoTokenizer

def main():
    # Load the fine-tuned model and tokenizer
    model_output_dir = "/Users/migueldeguzman/Desktop/papercliptodd/falcon-1b/v1/"  # Replace with your fine-tuned model directory
    tokenizer = AutoTokenizer.from_pretrained(model_output_dir)
    model = AutoModelForCausalLM.from_pretrained(model_output_dir)

    while True:
        # User input for text generation prompt
        prompt = input("Enter a prompt for text generation (or type 'exit' to quit): ")
        
        if prompt.lower() == 'exit':
            break

        # Encode the prompt and generate text
        input_ids = tokenizer.encode(prompt, return_tensors="pt")
        output = model.generate(
            input_ids,
            max_length=1024,
            num_return_sequences=1,
            no_repeat_ngram_size=2,
            top_k=50,
            top_p=0.95,
            temperature=0.001
        )

        # Decode and print the generated text
        generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
        print("Generated Text:")
        print(generated_text)

if __name__ == "__main__":
    main()