TESTtm7873
commited on
Commit
•
34f10b2
1
Parent(s):
3504e35
Update README.md
Browse files
README.md
CHANGED
@@ -20,43 +20,37 @@ This model is part of the VCC project and has been fine-tuned on the TESTtm7873/
|
|
20 |
|
21 |
To use this model, you'll need to set up your environment first:
|
22 |
|
23 |
-
|
24 |
-
python
|
25 |
-
import
|
26 |
-
from
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
load_in_4bit=True,
|
32 |
-
bnb_4bit_use_double_quant=True,
|
33 |
-
bnb_4bit_quant_type="nf4",
|
34 |
-
bnb_4bit_compute_dtype=torch.bfloat16
|
35 |
-
)
|
36 |
-
|
37 |
-
# Loading the base model with quantization config
|
38 |
-
base_model = AutoModelForCausalLM.from_pretrained(
|
39 |
-
base_model_id,
|
40 |
-
quantization_config=bnb_config,
|
41 |
device_map="auto",
|
42 |
-
trust_remote_code=True,
|
43 |
)
|
|
|
|
|
|
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
60 |
```
|
61 |
|
62 |
|
|
|
20 |
|
21 |
To use this model, you'll need to set up your environment first:
|
22 |
|
23 |
+
## Model initialization
|
24 |
+
```python
|
25 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
26 |
+
from peft import PeftModel
|
27 |
+
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
|
28 |
+
model = AutoModelForCausalLM.from_pretrained(
|
29 |
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
30 |
+
load_in_8bit=True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
device_map="auto",
|
|
|
32 |
)
|
33 |
+
model = PeftModel.from_pretrained(model, "TESTtm7873/MistralCat-1v")
|
34 |
+
model.eval()
|
35 |
+
```
|
36 |
|
37 |
+
## Inference
|
38 |
+
```python
|
39 |
+
def evaluate(question: str) -> str:
|
40 |
+
prompt = f"The conversation between human and Virtual Cat Companion.\n[|Human|] {question}.\n[|AI|] "
|
41 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
42 |
+
input_ids = inputs["input_ids"].cuda()
|
43 |
+
generation_output = model.generate(
|
44 |
+
input_ids=input_ids,
|
45 |
+
generation_config=generation_config,
|
46 |
+
return_dict_in_generate=True,
|
47 |
+
output_scores=True,
|
48 |
+
max_new_tokens=256
|
49 |
+
)
|
50 |
+
output = tokenizer.decode(generation_output.sequences[0]).split("[|AI|]")[1]
|
51 |
+
return output
|
52 |
+
your_question: str = "You have the softest fur."
|
53 |
+
print(evaluate(your_question))
|
54 |
```
|
55 |
|
56 |
|