File size: 1,413 Bytes
c67945c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
##########################
#Must include these lines#
##########################
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3"
import torch
from transformers import (
  AutoModelForCausalLM,
  AutoTokenizer,
  pipeline,
)
from peft import PeftModel

####################################################
#This is how you can set up Llama model on your end#
####################################################
model = AutoModelForCausalLM.from_pretrained(
  'codellama/CodeLlama-7b-Instruct-hf',
  device_map = "balanced_low_0"
)

tokenizer = AutoTokenizer.from_pretrained(
  'llama_prompt_model/tokenizer',
  padding=True,
  truncation=True,
  device_map = "balanced_low_0"
)

model.resize_token_embeddings(len(tokenizer))

model = PeftModel.from_pretrained(model, "llama_prompt_model/model")

model = model.merge_and_unload()

################################################
#This is how you can communicate with the model#
################################################
pipe = pipeline(task="text-generation", model = model, tokenizer = tokenizer, max_length = 2000, pad_token_id = 2)

##################################################
#feel free to change this line and see the result#
##################################################
prompt = "What's your name?"

result = pipe("<s>[INST]%s[/INST]"%(prompt))[0]['generated_text']

print(result)