DipeshChaudhary commited on
Commit
88b16e1
1 Parent(s): b623db3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -4
README.md CHANGED
@@ -24,19 +24,21 @@ base_model: unsloth/llama-3-8b-Instruct-bnb-4bit
24
  ```
25
 
26
  ```
27
- <!-- import torch
28
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
29
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
30
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
31
  from transformers import AutoTokenizer
32
- ```
 
33
  model, tokenizer = FastLanguageModel.from_pretrained(
34
  model_name="DipeshChaudhary/ShareGPTChatBot-Counselchat1", # Your fine-tuned model
35
  max_seq_length=max_seq_length,
36
  dtype=dtype,
37
  load_in_4bit=load_in_4bit,
38
  )
39
- ``` -->
 
40
  #We now use the Llama-3 format for conversation style finetunes. We use Open Assistant conversations in ShareGPT style.
41
  **We use our get_chat_template function to get the correct chat template. They support zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old and their own optimized unsloth template**
42
  from unsloth.chat_templates import get_chat_template
@@ -45,7 +47,7 @@ base_model: unsloth/llama-3-8b-Instruct-bnb-4bit
45
  chat_template = "llama-3", # Supports zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, unsloth
46
  mapping = {"role" : "from", "content" : "value", "user" : "human", "assistant" : "gpt"}, # ShareGPT style
47
  )
48
- ```
49
 
50
  # Uploaded model
51
 
 
24
  ```
25
 
26
  ```
27
+ import torch
28
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
29
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
30
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
31
  from transformers import AutoTokenizer
32
+ ```
33
+ ```
34
  model, tokenizer = FastLanguageModel.from_pretrained(
35
  model_name="DipeshChaudhary/ShareGPTChatBot-Counselchat1", # Your fine-tuned model
36
  max_seq_length=max_seq_length,
37
  dtype=dtype,
38
  load_in_4bit=load_in_4bit,
39
  )
40
+ ```
41
+ ```
42
  #We now use the Llama-3 format for conversation style finetunes. We use Open Assistant conversations in ShareGPT style.
43
  **We use our get_chat_template function to get the correct chat template. They support zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old and their own optimized unsloth template**
44
  from unsloth.chat_templates import get_chat_template
 
47
  chat_template = "llama-3", # Supports zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, unsloth
48
  mapping = {"role" : "from", "content" : "value", "user" : "human", "assistant" : "gpt"}, # ShareGPT style
49
  )
50
+ ```
51
 
52
  # Uploaded model
53