i4never commited on
Commit
cc8500b
1 Parent(s): 680bddd

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +63 -0
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+ <div style="width: 100%;">
5
+ <img src="http://x-pai.algolet.com/bot/img/logo_core.png" alt="TigerBot" style="width: 20%; display: block; margin: auto;">
6
+ </div>
7
+ <p align="center">
8
+ <font face="黑体" size=5"> A cutting-edge foundation for your very own LLM. </font>
9
+ </p>
10
+ <p align="center">
11
+ 🌐 <a href="https://tigerbot.com/" target="_blank">TigerBot</a> • 🤗 <a href="https://huggingface.co/TigerResearch" target="_blank">Hugging Face</a>
12
+ </p>
13
+
14
+ ## Github
15
+
16
+ https://github.com/TigerResearch/TigerBot
17
+
18
+ ## Usage
19
+
20
+ ```python
21
+ from transformers import AutoTokenizer, AutoModelForCausalLM
22
+ from accelerate import infer_auto_device_map, dispatch_model
23
+ from accelerate.utils import get_balanced_memory
24
+
25
+ tokenizer = AutoTokenizer.from_pretrained("TigerResearch/tigerbot-7b-sft-v2")
26
+
27
+ model = AutoModelForCausalLM.from_pretrained("TigerResearch/tigerbot-7b-sft-v2")
28
+
29
+ max_memory = get_balanced_memory(model)
30
+ device_map = infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["BloomBlock"])
31
+ model = dispatch_model(model, device_map=device_map, offload_buffers=True)
32
+
33
+ device = torch.cuda.current_device()
34
+
35
+
36
+ tok_ins = "\n\n### Instruction:\n"
37
+ tok_res = "\n\n### Response:\n"
38
+ prompt_input = tok_ins + "{instruction}" + tok_res
39
+
40
+ input_text = "What is the next number after this list: [1, 2, 3, 5, 8, 13, 21]"
41
+ input_text = prompt_input.format_map({'instruction': input_text})
42
+
43
+ max_input_length = 512
44
+ max_generate_length = 1024
45
+ generation_kwargs = {
46
+ "top_p": 0.95,
47
+ "temperature": 0.8,
48
+ "max_length": max_generate_length,
49
+ "eos_token_id": tokenizer.eos_token_id,
50
+ "pad_token_id": tokenizer.pad_token_id,
51
+ "early_stopping": True,
52
+ "no_repeat_ngram_size": 4,
53
+ }
54
+
55
+ inputs = tokenizer(input_text, return_tensors='pt', truncation=True, max_length=max_input_length)
56
+ inputs = {k: v.to(device) for k, v in inputs.items()}
57
+ output = model.generate(**inputs, **generation_kwargs)
58
+ answer = ''
59
+ for tok_id in output[0][inputs['input_ids'].shape[1]:]:
60
+ if tok_id != tokenizer.eos_token_id:
61
+ answer += tokenizer.decode(tok_id)
62
+ print(answer)
63
+ ```