Missing files

#8
by BadatFreaks - opened

I have my code as under, and I am getting the error attached, where can I find the missing files? I am new to LLMs please guide me!

import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

Define the model name

model_name = "TheBloke/Llama-2-70B-Chat-GGUF"
local_model_dir = "./Llama-2-70B-Model"

Check if GPU is available

device = 0 if torch.cuda.is_available() else -1

Function to load or download the model

def load_model():
if not os.path.exists(local_model_dir):
os.makedirs(local_model_dir, exist_ok=True)
print(f"Downloading model {model_name} for the first time...")
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model.save_pretrained(local_model_dir)
tokenizer.save_pretrained(local_model_dir)
else:
print(f"Loading model {model_name} from local storage...")
model = AutoModelForCausalLM.from_pretrained(local_model_dir)
tokenizer = AutoTokenizer.from_pretrained(local_model_dir)
return model, tokenizer

Load the model

model, tokenizer = load_model()

Create a pipeline with the local model

pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)

Interactive prompt

while True:
prompt = input("Enter your prompt (type 'exit' to stop): ")
if prompt.lower() == "exit":
break
response = pipe(prompt, max_length=50) # Adjust max_length as needed
print(response[0]['generated_text'])

Screenshot from 2023-12-26 12-28-50.png

Sign up or log in to comment