shunxing1234 commited on
Commit
f359e84
1 Parent(s): 06213d5

Update README_zh.md

Browse files
Files changed (1) hide show
  1. README_zh.md +7 -9
README_zh.md CHANGED
@@ -49,20 +49,18 @@ Aquila-7B v0.8 在 FlagEval 大模型评测中( “客观”)相比0.7的版
49
  ```python
50
  from transformers import AutoTokenizer, AutoModelForCausalLM
51
  import torch
52
- from cyg_conversation import covert_prompt_to_input_ids_with_history
53
 
54
- tokenizer = AutoTokenizer.from_pretrained("BAAI/AquilaChat-7B")
55
- model = AutoModelForCausalLM.from_pretrained("BAAI/AquilaChat-7B")
 
56
  model.eval()
57
- model.to("cuda:0")
58
- vocab = tokenizer.vocab
59
- print(len(vocab))
60
 
61
- text = "请给出10个要到北京旅游的理由。"
62
 
63
- tokens = covert_prompt_to_input_ids_with_history(text, history=[], tokenizer=tokenizer, max_token=512)
64
 
65
- tokens = torch.tensor(tokens)[None,].to("cuda:0")
66
 
67
 
68
  with torch.no_grad():
 
49
  ```python
50
  from transformers import AutoTokenizer, AutoModelForCausalLM
51
  import torch
 
52
 
53
+ model_info = "BAAI/AquilaCode-py"
54
+ tokenizer = AutoTokenizer.from_pretrained(model_info, trust_remote_code=True)
55
+ model = AutoModelForCausalLM.from_pretrained(model_info, trust_remote_code=True)
56
  model.eval()
57
+ model.to("cuda:4")
 
 
58
 
59
+ text = "#补全代码\ndef quick_sort(x):"
60
 
61
+ tokens = tokenizer.encode_plus(text)['input_ids'][:-1]
62
 
63
+ tokens = torch.tensor(tokens)[None,].to("cuda:4")
64
 
65
 
66
  with torch.no_grad():