manjunathshiva commited on
Commit
86b178b
1 Parent(s): 9d2fb6e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +36 -14
README.md CHANGED
@@ -3,8 +3,12 @@ tags:
3
  - autotrain
4
  - text-generation
5
  widget:
6
- - text: "I love AutoTrain because "
7
- license: other
 
 
 
 
8
  ---
9
 
10
  # Model Trained Using AutoTrain
@@ -16,25 +20,43 @@ This model was trained using AutoTrain. For more information, please visit [Auto
16
  ```python
17
 
18
  from transformers import AutoModelForCausalLM, AutoTokenizer
19
-
20
- model_path = "PATH_TO_THIS_REPO"
21
-
22
- tokenizer = AutoTokenizer.from_pretrained(model_path)
23
- model = AutoModelForCausalLM.from_pretrained(
24
- model_path,
25
- device_map="auto",
26
- torch_dtype='auto'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  ).eval()
28
 
29
- # Prompt content: "hi"
30
  messages = [
31
- {"role": "user", "content": "hi"}
32
  ]
33
 
34
  input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
35
- output_ids = model.generate(input_ids.to('cuda'))
 
36
  response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
37
 
38
- # Model response: "Hello! How can I assist you today?"
39
  print(response)
 
40
  ```
 
3
  - autotrain
4
  - text-generation
5
  widget:
6
+ - text: 'I love AutoTrain because '
7
+ license: apache-2.0
8
+ datasets:
9
+ - manjunathshiva/autotrain-data-GRADE3B-7B-02
10
+ language:
11
+ - en
12
  ---
13
 
14
  # Model Trained Using AutoTrain
 
20
  ```python
21
 
22
  from transformers import AutoModelForCausalLM, AutoTokenizer
23
+ from peft import PeftModel
24
+ import torch
25
+ access_token = "<HF_TOKEN>"
26
+
27
+
28
+ tokenizer = AutoTokenizer.from_pretrained(
29
+ "meta-llama/Llama-2-7b-chat-hf"
30
+ )
31
+
32
+
33
+ base_model = AutoModelForCausalLM.from_pretrained(
34
+ 'meta-llama/Llama-2-7b-chat-hf',
35
+ token=access_token,
36
+ trust_remote_code=True,
37
+ #device_map="auto", #Uncomment if you hava a good GPU Memory
38
+ torch_dtype=torch.float16,
39
+ offload_folder="offload/"
40
+ )
41
+ model = PeftModel.from_pretrained(
42
+ base_model,
43
+ 'manjunathshiva/GRADE3B-7B-02-0',
44
+ token=access_token,
45
+ offload_folder="offload/"
46
+
47
  ).eval()
48
 
49
+ # Prompt content: "When is Maths Unit Test 2?"
50
  messages = [
51
+ {"role": "user", "content": "When is Maths Unit Test 2?"}
52
  ]
53
 
54
  input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
55
+ #output_ids = model.generate(input_ids.to('cuda')) #Uncomment if you have CUDA and comment below line
56
+ output_ids = model.generate(input_ids=input_ids, temperature=0.01 )
57
  response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
58
 
59
+ # Model response: "<Outputs Date>"
60
  print(response)
61
+
62
  ```