Update README.md
Browse files
README.md
CHANGED
@@ -54,6 +54,12 @@ The code below performs the following steps:
|
|
54 |
import torch
|
55 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
model_id = "hipnologo/GPT-Neox-20b-QLoRA-FineTune-english_quotes_dataset"
|
58 |
bnb_config = BitsAndBytesConfig(
|
59 |
load_in_4bit=True,
|
@@ -62,7 +68,7 @@ bnb_config = BitsAndBytesConfig(
|
|
62 |
bnb_4bit_compute_dtype=torch.bfloat16
|
63 |
)
|
64 |
|
65 |
-
|
66 |
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map={"":0})
|
67 |
|
68 |
text = "Twenty years from now"
|
|
|
54 |
import torch
|
55 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
56 |
|
57 |
+
# Load the base pre-trained model
|
58 |
+
base_model_id = "EleutherAI/gpt-neox-20b"
|
59 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model_id)
|
60 |
+
model = AutoModelForCausalLM.from_pretrained(base_model_id)
|
61 |
+
|
62 |
+
# Fine-tuning model
|
63 |
model_id = "hipnologo/GPT-Neox-20b-QLoRA-FineTune-english_quotes_dataset"
|
64 |
bnb_config = BitsAndBytesConfig(
|
65 |
load_in_4bit=True,
|
|
|
68 |
bnb_4bit_compute_dtype=torch.bfloat16
|
69 |
)
|
70 |
|
71 |
+
# Load the fine-tuned model
|
72 |
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map={"":0})
|
73 |
|
74 |
text = "Twenty years from now"
|