Update README.md
Browse files
README.md
CHANGED
@@ -37,6 +37,80 @@ base_model: NousResearch/Llama-2-7b-chat-hf
|
|
37 |
|
38 |
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
### Direct Use
|
41 |
|
42 |
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
|
|
37 |
|
38 |
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
|
40 |
+
#code
|
41 |
+
|
42 |
+
'''python
|
43 |
+
|
44 |
+
#testing and loading model
|
45 |
+
|
46 |
+
import torch, gc
|
47 |
+
gc.collect()
|
48 |
+
torch.cuda.empty_cache()
|
49 |
+
|
50 |
+
import numpy as np
|
51 |
+
import pandas as pd
|
52 |
+
import os
|
53 |
+
from tqdm import tqdm
|
54 |
+
import bitsandbytes as bnb
|
55 |
+
import torch
|
56 |
+
import torch.nn as nn
|
57 |
+
import transformers
|
58 |
+
from datasets import Dataset
|
59 |
+
from peft import LoraConfig, PeftConfig
|
60 |
+
from trl import SFTTrainer
|
61 |
+
from transformers import (AutoModelForCausalLM,
|
62 |
+
AutoTokenizer,
|
63 |
+
BitsAndBytesConfig,
|
64 |
+
TrainingArguments,
|
65 |
+
pipeline,
|
66 |
+
logging)
|
67 |
+
from sklearn.metrics import (accuracy_score,
|
68 |
+
classification_report,
|
69 |
+
confusion_matrix)
|
70 |
+
from sklearn.model_selection import train_test_split
|
71 |
+
|
72 |
+
from datasets import load_dataset
|
73 |
+
from peft import LoraConfig, PeftModel
|
74 |
+
|
75 |
+
device_map = {"": 0}
|
76 |
+
PEFT_MODEL = "kr-manish/Llama-2-7b-chat-finetune-for-textGeneration"
|
77 |
+
#model_name = "NousResearch/Llama-2-7b-hf"
|
78 |
+
|
79 |
+
config = PeftConfig.from_pretrained(PEFT_MODEL)
|
80 |
+
|
81 |
+
model = AutoModelForCausalLM.from_pretrained(
|
82 |
+
config.base_model_name_or_path,
|
83 |
+
low_cpu_mem_usage=True,
|
84 |
+
return_dict=True,
|
85 |
+
#quantization_config=bnb_config,
|
86 |
+
device_map="auto",
|
87 |
+
#trust_remote_code=True,
|
88 |
+
torch_dtype=torch.float16,
|
89 |
+
)
|
90 |
+
|
91 |
+
tokenizer=AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
92 |
+
tokenizer.pad_token = tokenizer.eos_token
|
93 |
+
|
94 |
+
load_model = PeftModel.from_pretrained(model, PEFT_MODEL)
|
95 |
+
|
96 |
+
test1 ="How to own a plane in the United States?"
|
97 |
+
prompt_test = test1
|
98 |
+
pipe_test = pipeline(task="text-generation",
|
99 |
+
model=load_model,
|
100 |
+
tokenizer=tokenizer,
|
101 |
+
#max_length =20,
|
102 |
+
max_new_tokens =25,
|
103 |
+
temperature = 0.0,
|
104 |
+
|
105 |
+
)
|
106 |
+
result_test = pipe_test(prompt_test)
|
107 |
+
#answer = result[0]['generated_text'].split("=")[-1]
|
108 |
+
answer_test = result_test[0]['generated_text']
|
109 |
+
answer_test
|
110 |
+
|
111 |
+
#How to own a plane in the United States?\n\nIn the United States, owning a plane is a significant investment and requires careful planning and research. Here are
|
112 |
+
|
113 |
+
'''
|
114 |
### Direct Use
|
115 |
|
116 |
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|