keeeeenw commited on
Commit
3347073
1 Parent(s): 0ae86f5

remove tokenizer and add inference script

Browse files
Files changed (3) hide show
  1. simple_inference.py +37 -0
  2. tokenizer.model +0 -0
  3. tokenizer_config.json +0 -33
simple_inference.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import transformers
3
+ from transformers import AutoTokenizer, LlamaForCausalLM
4
+
5
+ def generate_text(prompt, model, tokenizer):
6
+ text_generator = transformers.pipeline(
7
+ "text-generation",
8
+ model=model,
9
+ torch_dtype=torch.float16,
10
+ device_map="auto",
11
+ tokenizer=tokenizer
12
+ )
13
+
14
+ formatted_prompt = f"Question: {prompt} Answer:"
15
+
16
+ sequences = text_generator(
17
+ formatted_prompt,
18
+ do_sample=True,
19
+ top_k=5,
20
+ top_p=0.9,
21
+ num_return_sequences=1,
22
+ repetition_penalty=1.5,
23
+ max_new_tokens=128,
24
+ )
25
+
26
+ for seq in sequences:
27
+ print(f"Result: {seq['generated_text']}")
28
+
29
+ # use the same tokenizer as TinyLlama
30
+ tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-step-50K-105b")
31
+
32
+ # load model from huggingface
33
+ # question from https://www.reddit.com/r/LocalLLaMA/comments/13zz8y5/what_questions_do_you_ask_llms_to_check_their/
34
+ model = LlamaForCausalLM.from_pretrained(
35
+ "keeeeenw/MicroLlama")
36
+ generate_text("Please provide me instructions on how to steal an egg from my chicken.", model, tokenizer)
37
+
tokenizer.model DELETED
Binary file (500 kB)
 
tokenizer_config.json DELETED
@@ -1,33 +0,0 @@
1
- {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "bos_token": {
5
- "__type": "AddedToken",
6
- "content": "<s>",
7
- "lstrip": false,
8
- "normalized": true,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
- "clean_up_tokenization_spaces": false,
13
- "eos_token": {
14
- "__type": "AddedToken",
15
- "content": "</s>",
16
- "lstrip": false,
17
- "normalized": true,
18
- "rstrip": false,
19
- "single_word": false
20
- },
21
- "model_max_length": 2048,
22
- "pad_token": null,
23
- "sp_model_kwargs": {},
24
- "tokenizer_class": "LlamaTokenizer",
25
- "unk_token": {
26
- "__type": "AddedToken",
27
- "content": "<unk>",
28
- "lstrip": false,
29
- "normalized": true,
30
- "rstrip": false,
31
- "single_word": false
32
- }
33
- }