duyntnet commited on
Commit
14d35c9
1 Parent(s): 1c66838

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +63 -0
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ language:
4
+ - en
5
+ pipeline_tag: text-generation
6
+ inference: false
7
+ tags:
8
+ - transformers
9
+ - gguf
10
+ - imatrix
11
+ - Orca-2-13b
12
+ ---
13
+ Quantizations of https://huggingface.co/microsoft/Orca-2-13b
14
+
15
+
16
+ # From original readme
17
+
18
+ ## Getting started with Orca 2
19
+
20
+ **Inference with Hugging Face library**
21
+
22
+ ```python
23
+ import torch
24
+ import transformers
25
+
26
+ if torch.cuda.is_available():
27
+ torch.set_default_device("cuda")
28
+ else:
29
+ torch.set_default_device("cpu")
30
+
31
+ model = transformers.AutoModelForCausalLM.from_pretrained("microsoft/Orca-2-13b", device_map='auto')
32
+
33
+ # https://github.com/huggingface/transformers/issues/27132
34
+ # please use the slow tokenizer since fast and slow tokenizer produces different tokens
35
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
36
+ "microsoft/Orca-2-13b",
37
+ use_fast=False,
38
+ )
39
+
40
+ system_message = "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."
41
+ user_message = "How can you determine if a restaurant is popular among locals or mainly attracts tourists, and why might this information be useful?"
42
+
43
+ prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
44
+
45
+ inputs = tokenizer(prompt, return_tensors='pt')
46
+ output_ids = model.generate(inputs["input_ids"],)
47
+ answer = tokenizer.batch_decode(output_ids)[0]
48
+
49
+ print(answer)
50
+
51
+ # This example continues showing how to add a second turn message by the user to the conversation
52
+ second_turn_user_message = "Give me a list of the key points of your first answer."
53
+
54
+ # we set add_special_tokens=False because we dont want to automatically add a bos_token between messages
55
+ second_turn_message_in_markup = f"\n<|im_start|>user\n{second_turn_user_message}<|im_end|>\n<|im_start|>assistant"
56
+ second_turn_tokens = tokenizer(second_turn_message_in_markup, return_tensors='pt', add_special_tokens=False)
57
+ second_turn_input = torch.cat([output_ids, second_turn_tokens['input_ids']], dim=1)
58
+
59
+ output_ids_2 = model.generate(second_turn_input,)
60
+ second_turn_answer = tokenizer.batch_decode(output_ids_2)[0]
61
+
62
+ print(second_turn_answer)
63
+ ```