ericzzz commited on
Commit
c0e7423
1 Parent(s): c50d548

Upload model

Browse files
README.md CHANGED
@@ -1,3 +1,41 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ datasets:
4
+ - Open-Orca/SlimOrca
5
+ language:
6
+ - en
7
+ pipeline_tag: text-generation
8
+ inference: false
9
+ tags:
10
+ - text-generation-inference
11
  ---
12
+
13
+ ```python
14
+ from transformers import AutoTokenizer, AutoModelForCausalLM
15
+ import transformers
16
+ import torch
17
+
18
+ model = 'ericzzz/falcon-rw-1b-instruct-openorca'
19
+
20
+ tokenizer = AutoTokenizer.from_pretrained(model)
21
+ pipeline = transformers.pipeline(
22
+ "text-generation",
23
+ model=model,
24
+ tokenizer=tokenizer,
25
+ torch_dtype=torch.bfloat16,
26
+ device_map="auto",
27
+ )
28
+
29
+ system_message = "You are a helpful assistant. Give short answers."
30
+ instruction = "What is AI? Give some examples."
31
+ prompt = f"<SYS> {system_message} <INST> {instruction} <RESP> "
32
+
33
+ response = pipeline(
34
+ prompt,
35
+ max_length=200,
36
+ repetition_penalty=1.05
37
+ )
38
+
39
+ print(response[0]['generated_text'])
40
+ # AI, or Artificial Intelligence, refers to the ability of machines and software to perform tasks that require human intelligence, such as learning, reasoning, and problem-solving. It can be used in various fields like computer science, engineering, medicine, and more. Some common applications include image recognition, speech translation, and natural language processing.
41
+ ```
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alibi": true,
3
+ "apply_residual_connection_post_layernorm": false,
4
+ "architectures": [
5
+ "FalconForCausalLM"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bias": true,
9
+ "hidden_dropout": 0.0,
10
+ "hidden_size": 2048,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "max_position_embeddings": 2048,
14
+ "model_type": "falcon",
15
+ "multi_query": false,
16
+ "new_decoder_architecture": false,
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 24,
19
+ "num_kv_heads": 32,
20
+ "parallel_attn": false,
21
+ "rope_scaling": null,
22
+ "rope_theta": 10000.0,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.35.2",
25
+ "use_cache": false,
26
+ "vocab_size": 50304
27
+ }
generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 50256,
3
+ "eos_token_id": 50256,
4
+ "transformers_version": "4.35.2"
5
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5dfb67a4f95380dce654e9edf5a229963e353a5a539868770c852bc42a65884
3
+ size 135
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": true,
15
+ "eos_token": "<|endoftext|>",
16
+ "model_max_length": 1024,
17
+ "tokenizer_class": "GPT2Tokenizer",
18
+ "unk_token": "<|endoftext|>"
19
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff