m4r1 commited on
Commit
f82788a
1 Parent(s): 4a1158e

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -34,4 +34,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  selfrag_llama2_7b.q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
37
- selfrag_llama2_7b.q5_k_m.gguf filter=lfs diff=lfs merge=lfs -text
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  selfrag_llama2_7b.q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
 
README.md CHANGED
@@ -1 +1,71 @@
1
- gguf quantized models of https://huggingface.co/selfrag/selfrag_llama2_7b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
4
+ This model is a 7B [Self-RAG](https://selfrag.github.io/) model that generates outputs to diverse user queries as well as *reflection tokens* to call the retrieval system adaptively and criticize its own output and retrieved passages.
5
+
6
+ Self-RAG is trained on our instruction-following corpora with interleaving passages and reflection tokens using the standard next-token prediction objective, enabling efficient and stable learning with fine-grained feedback.
7
+ At inference, we leverage reflection tokens covering diverse aspects of generations to sample the best output aligning users' preferences.
8
+ See full descriptions in See full descriptions in [our paper](https://arxiv.org/abs/2310.11511).
9
+
10
+ ## Usage
11
+ Here, we show an easy way to quickly download our model from HuggingFace and run with `vllm` with pre-given passages. Make sure to install dependencies listed at [self-rag/requirements.txt](https://github.com/AkariAsai/self-rag/blob/main/requirements.txt).
12
+ To run our full inference pipeline with a retrieval system and fine-grained tree decoding, please use [our code](https://github.com/AkariAsai/self-rag).
13
+
14
+ ```py
15
+ from transformers import AutoTokenizer, AutoModelForCausalLM
16
+ from vllm import LLM, SamplingParams
17
+
18
+ model = LLM("selfrag/selfrag_llama2_7b", download_dir="/gscratch/h2lab/akari/model_cache", dtype="half")
19
+ sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=100, skip_special_tokens=False)
20
+
21
+ def format_prompt(input, paragraph=None):
22
+ prompt = "### Instruction:\n{0}\n\n### Response:\n".format(input)
23
+ if paragraph is not None:
24
+ prompt += "[Retrieval]<paragraph>{0}</paragraph>".format(paragraph)
25
+ return prompt
26
+
27
+ query_1 = "Leave odd one out: twitter, instagram, whatsapp."
28
+ query_2 = "Can you tell me the difference between llamas and alpacas?"
29
+ queries = [query_1, query_2]
30
+
31
+ preds = model.generate([format_prompt(query) for query in queries], sampling_params)
32
+ for pred in preds:
33
+ print("Model prediction: {0}".format(pred.outputs[0].text))
34
+ # Model prediction: Twitter, Instagram, and WhatsApp are all social media platforms.[No Retrieval]WhatsApp is the odd one out because it is a messaging app, while Twitter and # Instagram are primarily used for sharing photos and videos.[Utility:5]</s> (this query doesn't require factual grounding; just skip retrieval and do normal instruction-following generation)
35
+ # Model prediction: Sure![Retrieval]<paragraph> ... (this query requires factual grounding, call a retriever)
36
+
37
+ # generate with retrieved passage
38
+ prompt = format_prompt("Can you tell me the difference between llamas and alpacas?", paragraph="The alpaca (Lama pacos) is a species of South American camelid mammal. It is similar to, and often confused with, the llama. Alpacas are considerably smaller than llamas, and unlike llamas, they were not bred to be working animals, but were bred specifically for their fiber.")
39
+ preds = model.generate([prompt], sampling_params)
40
+ print([pred.outputs[0].text for pred in preds])
41
+ # ['[Relevant]Alpacas are considerably smaller than llamas, and unlike llamas, they were not bred to be working animals, but were bred specifically for their fiber.[Fully supported][Utility:5]</s>']
42
+ ```
43
+
44
+ ## Input Format
45
+ As described in the `format_prompt` function, your input should be formed as
46
+ ```
47
+ ### Instruction:\n{instruction}\n\n### Response:\n".format(instruction)
48
+ ```
49
+ or
50
+ ```
51
+ ### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
52
+ ```
53
+ If you have additional input.
54
+ You can insert paragraphs anywhere after `### Response:\n"`, but make sure to mark paragraphs as paragraph tokens (i.e., `<paragraph>{0}</paragraph>`).
55
+
56
+ ## Training details
57
+ Our training data is available at the HuggingFace dataset [selfrag_train_data](https://huggingface.co/datasets/selfrag/selfrag_train_data).
58
+ See our official repository for the training details.
59
+ We used 8 A100 40GB for training on the Stability HPC server.
60
+
61
+ ## Citation and contact
62
+ If you use this model, please cite our work:
63
+ ```
64
+ @article{asai2023selfrag,
65
+ author = {Asai, Akari and Wu, Zeqiu and Wang, Yizhong and Sil, Avirup and Hajishirzi, Hannaneh},
66
+ title = {{Self-RAG}: Learning to Retrieve, Generate, and Critique through Self-Reflection},
67
+ year = {2023},
68
+ journal = { arXiv preprint arXiv:2310.11511 },
69
+ URL = {https://arxiv.org/abs/2310.11511}
70
+ }
71
+ ```
added_tokens.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</paragraph>": 32006,
3
+ "<pad>": 32015,
4
+ "<paragraph>": 32005,
5
+ "[Continue to Use Evidence]": 32002,
6
+ "[Fully supported]": 32012,
7
+ "[Irrelevant]": 32003,
8
+ "[No Retrieval]": 32000,
9
+ "[No support / Contradictory]": 32014,
10
+ "[Partially supported]": 32013,
11
+ "[Relevant]": 32004,
12
+ "[Retrieval]": 32001,
13
+ "[Utility:1]": 32007,
14
+ "[Utility:2]": 32008,
15
+ "[Utility:3]": 32009,
16
+ "[Utility:4]": 32010,
17
+ "[Utility:5]": 32011
18
+ }
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "meta-llama/Llama-2-7b-hf",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 11008,
12
+ "max_position_embeddings": 4096,
13
+ "model_type": "llama",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "num_key_value_heads": 32,
17
+ "pretraining_tp": 1,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_scaling": null,
20
+ "rope_theta": 10000.0,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.34.0.dev0",
24
+ "use_cache": true,
25
+ "vocab_size": 32016
26
+ }
generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "do_sample": true,
4
+ "eos_token_id": 2,
5
+ "max_length": 4096,
6
+ "pad_token_id": 0,
7
+ "temperature": 0.6,
8
+ "top_p": 0.9,
9
+ "transformers_version": "4.34.0.dev0"
10
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "[No Retrieval]",
4
+ "[Retrieval]",
5
+ "[Continue to Use Evidence]",
6
+ "[Irrelevant]",
7
+ "[Relevant]",
8
+ "<paragraph>",
9
+ "</paragraph>",
10
+ "[Utility:1]",
11
+ "[Utility:2]",
12
+ "[Utility:3]",
13
+ "[Utility:4]",
14
+ "[Utility:5]",
15
+ "[Fully supported]",
16
+ "[Partially supported]",
17
+ "[No support / Contradictory]"
18
+ ],
19
+ "bos_token": "<s>",
20
+ "eos_token": "</s>",
21
+ "pad_token": "<pad>",
22
+ "unk_token": "<unk>"
23
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "spaces_between_special_tokens": false,
27
+ "tokenizer_class": "LlamaTokenizer",
28
+ "unk_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "use_default_system_prompt": true
37
+ }