akumar23 commited on
Commit
1ab3ee8
1 Parent(s): 97f0f03

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +78 -0
README.md CHANGED
@@ -12,6 +12,84 @@ Builds on the falcon 7b instruct shared model, the specific model is linked in t
12
  Trained using this [mental health dataset](https://huggingface.co/datasets/Amod/mental_health_counseling_conversations)
13
 
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  ## Model Details
16
 
17
  ### Model Description
 
12
  Trained using this [mental health dataset](https://huggingface.co/datasets/Amod/mental_health_counseling_conversations)
13
 
14
 
15
+ How to test the model
16
+
17
+ ```
18
+ import json
19
+ import os
20
+ from pprint import pprint
21
+ import bitsandbytes as bnb
22
+ import torch
23
+ import torch.nn as nn
24
+ import transformers
25
+ from datasets import load_dataset
26
+ from huggingface_hub import notebook_login
27
+ from peft import (
28
+ LoraConfig,
29
+ PeftConfig,
30
+ PeftModel,
31
+ get_peft_model,
32
+ prepare_model_for_kbit_training
33
+ )
34
+ from transformers import (
35
+ AutoConfig,
36
+ AutoModelForCausalLM,
37
+ AutoTokenizer,
38
+ BitsAndBytesConfig
39
+ )
40
+
41
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
42
+
43
+ PEFT_MODEL = "akumar23/mental-falcon-7b"
44
+
45
+ bnb_config = BitsAndBytesConfig(
46
+ load_in_4bit=True,
47
+ bnb_4bit_use_double_quant=True,
48
+ bnb_4bit_quant_type="nf4",
49
+ bnb_4bit_compute_dtype=torch.bfloat16
50
+ )
51
+
52
+ config = PeftConfig.from_pretrained(PEFT_MODEL)
53
+ model = AutoModelForCausalLM.from_pretrained(
54
+ config.base_model_name_or_path,
55
+ return_dict=True,
56
+ quantization_config=bnb_config,
57
+ device_map="auto",
58
+ trust_remote_code=True
59
+ )
60
+
61
+ tokenizer=AutoTokenizer.from_pretrained(config.base_model_name_or_path)
62
+ tokenizer.pad_token = tokenizer.eos_token
63
+
64
+ model = PeftModel.from_pretrained(model, PEFT_MODEL)
65
+
66
+ generation_config = model.generation_config
67
+ generation_config.max_new_tokens = 200
68
+ generation_config.temperature = 0.7
69
+ generation_config.top_p = 0.7
70
+ generation_config.num_return_sequences = 1
71
+ generation_config.pad_token_id = tokenizer.eos_token_id
72
+ generation_config.eos_token_id = tokenizer.eos_token_id
73
+
74
+ device = "cuda:0"
75
+
76
+ prompt = """
77
+ <human>: how do i know if i am depressed
78
+ <assistant>:
79
+ """.strip()
80
+
81
+ encoding = tokenizer(prompt, return_tensors="pt").to(device)
82
+ with torch.inference_mode():
83
+ outputs = model.generate(
84
+ input_ids = encoding.input_ids,
85
+ attention_mask = encoding.attention_mask,
86
+ generation_config = generation_config
87
+ )
88
+
89
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
90
+ ```
91
+
92
+
93
  ## Model Details
94
 
95
  ### Model Description