cguna frreiss commited on
Commit
80632af
·
verified ·
1 Parent(s): 9b08116

Updated certainty and answerability models (#9)

Browse files

- Updated loras for answerability and citations (b3b1d010db86154c687b670c7ed94243af2f44eb)


Co-authored-by: Fred Reiss <frreiss@users.noreply.huggingface.co>

answerability/lora/gpt-oss-20b/adapter_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "openai/gpt-oss-20b",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 32,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "v_proj",
29
+ "q_proj",
30
+ "k_proj"
31
+ ],
32
+ "target_parameters": [
33
+ "7.mlp.experts.gate_up_proj",
34
+ "7.mlp.experts.down_proj",
35
+ "15.mlp.experts.gate_up_proj",
36
+ "15.mlp.experts.down_proj",
37
+ "23.mlp.experts.gate_up_proj",
38
+ "23.mlp.experts.down_proj"
39
+ ],
40
+ "task_type": "CAUSAL_LM",
41
+ "trainable_token_indices": null,
42
+ "use_dora": false,
43
+ "use_qalora": false,
44
+ "use_rslora": false
45
+ }
answerability/lora/gpt-oss-20b/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ee17447d06e0327bed4cd6811da1fa2607520285bb35c712bccb8d6d7f9e772
3
+ size 219238968
answerability/lora/gpt-oss-20b/io.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model name string, or null to use whatever is provided in the chat completion request
2
+ model: ~
3
+ # JSON schema of the model's output
4
+ response_format: |
5
+ {
6
+ "type": "string",
7
+ "enum": ["answerable", "unanswerable"]
8
+ }
9
+ transformations:
10
+ - type: likelihood
11
+ categories_to_values:
12
+ "answerable": 1.0
13
+ "unanswerable": 0.0
14
+ input_path: []
15
+ output_name: ~ # Null == no change
16
+ instruction: ~
17
+ parameters:
18
+ max_completion_tokens: 6
19
+ sentence_boundaries: false
citations/lora/granite-3.3-2b-instruct/adapter_config.json CHANGED
@@ -3,9 +3,6 @@
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "ibm-granite/granite-3.3-2b-instruct",
5
  "bias": "none",
6
- "corda_config": null,
7
- "eva_config": null,
8
- "exclude_modules": null,
9
  "fan_in_fan_out": false,
10
  "inference_mode": true,
11
  "init_lora_weights": true,
@@ -14,8 +11,7 @@
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
  "lora_alpha": 32,
17
- "lora_bias": false,
18
- "lora_dropout": 0.05,
19
  "megatron_config": null,
20
  "megatron_core": "megatron.core",
21
  "modules_to_save": null,
@@ -24,12 +20,15 @@
24
  "rank_pattern": {},
25
  "revision": null,
26
  "target_modules": [
27
- "k_proj",
28
  "q_proj",
29
- "v_proj"
 
 
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
- "trainable_token_indices": null,
33
  "use_dora": false,
34
  "use_rslora": false
35
  }
 
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "ibm-granite/granite-3.3-2b-instruct",
5
  "bias": "none",
 
 
 
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
 
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
 
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "q_proj",
24
+ "k_proj",
25
+ "v_proj",
26
+ "o_proj",
27
+ "up_proj",
28
+ "down_proj",
29
+ "gate_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
 
32
  "use_dora": false,
33
  "use_rslora": false
34
  }
citations/lora/granite-3.3-2b-instruct/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6957e303240dc287b6d894898fde78736f15d5057fd8f7b8551cf3c92e8ff4e2
3
- size 23625040
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be6dc64b4c7737c74a4c37efca2dc621cd8ac5f918be723aec23f59bf7873c2c
3
+ size 56435848
citations/lora/granite-3.3-2b-instruct/io.yaml ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model name string, or null to use whatever is provided in the chat completion request
2
+ model: ~
3
+ # JSON schema of the model's output
4
+ response_format: |
5
+ {
6
+ "$defs": {
7
+ "_MODEL_OUTPUT_ENTRY": {
8
+ "properties": {
9
+ "r": {
10
+ "minimum": 0,
11
+ "title": "R",
12
+ "type": "integer"
13
+ },
14
+ "c": {
15
+ "items": {
16
+ "minimum": 0,
17
+ "type": "integer"
18
+ },
19
+ "title": "C",
20
+ "type": "array"
21
+ }
22
+ },
23
+ "required": [
24
+ "r",
25
+ "c"
26
+ ],
27
+ "title": "_MODEL_OUTPUT_ENTRY",
28
+ "type": "object"
29
+ }
30
+ },
31
+ "items": {
32
+ "$ref": "#/$defs/_MODEL_OUTPUT_ENTRY"
33
+ },
34
+ "title": "_MODEL_OUTPUT",
35
+ "type": "array"
36
+ }
37
+ transformations:
38
+ # Explode the list of document sentences in each citation
39
+ - type: explode
40
+ input_path: [] # Zero-length path means match root element
41
+ target_field: "c"
42
+ # Model may repeat itself; drop the resulting duplicates.
43
+ - type: drop_duplicates
44
+ input_path: [] # Zero-length path means match root element
45
+ target_fields: ["r", "c"]
46
+ # Replace sentence number with sentence location and contents.
47
+ # Do this first for sentences from the last turn, then for sentences from documents.
48
+ - type: decode_sentences
49
+ source: "last_message"
50
+ input_path: [~, "r"] # Null in path means wildcard
51
+ # New fields to add for each sentence
52
+ output_names:
53
+ begin: "response_begin"
54
+ end: "response_end"
55
+ text: "response_text"
56
+ - type: decode_sentences
57
+ source: "documents"
58
+ input_path: [~, "c"] # Null in path means wildcard
59
+ # New fields to add for each sentence
60
+ output_names:
61
+ document_id: "citation_doc_id"
62
+ begin: "citation_begin"
63
+ end: "citation_end"
64
+ text: "citation_text"
65
+ # Remove fields that we no longer need
66
+ - type: project
67
+ input_path: []
68
+ retained_fields:
69
+ - "response_begin"
70
+ - "response_end"
71
+ - "response_text"
72
+ - "citation_doc_id"
73
+ - "citation_begin"
74
+ - "citation_end"
75
+ - "citation_text"
76
+ # Merge adjacent document spans
77
+ - type: merge_spans
78
+ input_path: []
79
+ group_fields: ["response_begin", "response_end", "response_text", "citation_doc_id"]
80
+ begin_field: "citation_begin"
81
+ end_field: "citation_end"
82
+ text_field: "citation_text"
83
+
84
+ instruction: >
85
+ Split the last assistant response into individual sentences.
86
+ For each sentence in the response, identify the statement IDs from the below
87
+ documents that it references. Ensure that your output includes all response
88
+ sentence IDs, and for each response sentence ID, provide the list of corresponding
89
+ referring document sentence IDs. The output must be a json structure.
90
+ parameters:
91
+ max_completion_tokens: 4096
92
+ sentence_boundaries:
93
+ # Mapping from string location to sentence delimiter prefix
94
+ last_message: "r" # <r0>, <r1>, etc.
95
+ documents: "c"
96
+
citations/lora/granite-3.3-2b-instruct/tokenizer_config.json CHANGED
@@ -221,12 +221,11 @@
221
  "<|end_of_plugin|>"
222
  ],
223
  "bos_token": "<|end_of_text|>",
224
- "chat_template": "{# Alias tools -> available_tools #}\n{%- if tools and not available_tools -%}\n {%- set available_tools = tools -%}\n{%- endif -%}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n {%- else %}\n {%- set system_message = \"Knowledge Cutoff Date: April 2024.\nToday's Date: \" + strftime_now('%B %d, %Y') + \".\nYou are Granite, developed by IBM.\" %}\n {%- if available_tools and documents %}\n {%- set system_message = system_message + \" You are a helpful assistant with access to the following tools. When a tool is required to answer the user's query, respond only with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request.\nWrite the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}\n {%- elif available_tools %}\n {%- set system_message = system_message + \" You are a helpful assistant with access to the following tools. When a tool is required to answer the user's query, respond only with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request.\" %}\n {%- elif documents %}\n {%- set system_message = system_message + \" Write the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}\n {%- elif thinking %}\n {%- set system_message = system_message + \" You are a helpful AI assistant.\nRespond to every user query in a comprehensive and detailed way. You can write down your thoughts and reasoning process before responding. In the thought process, engage in a comprehensive cycle of analysis, summarization, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. In the response section, based on various attempts, explorations, and reflections from the thoughts section, systematically present the final solution that you deem correct. The response should summarize the thought process. Write your thoughts between <think></think> and write your response between <response></response> for each user query.\" %}\n {%- else %}\n {%- set system_message = system_message + \" You are a helpful AI assistant.\" %}\n {%- endif %}\n {%- if 'citations' in controls and documents %}\n {%- set system_message = system_message + '\nUse the symbols <|start_of_cite|> and <|end_of_cite|> to indicate when a fact comes from a document in the search result, e.g <|start_of_cite|> {document_id: 1}my fact <|end_of_cite|> for a fact from document 1. Afterwards, list all the citations with their corresponding documents in an ordered list.' %}\n {%- endif %}\n {%- if 'hallucinations' in controls and documents %}\n {%- set system_message = system_message + '\nFinally, after the response is written, include a numbered list of sentences from the response with a corresponding risk value that are hallucinated and not based in the documents.' %}\n {%- endif %}\n {%- set loop_messages = messages %}\n {%- endif %}\n {{- '<|start_of_role|>system<|end_of_role|>' + system_message + '<|end_of_text|>\n' }}\n {%- if available_tools %}\n {{- '<|start_of_role|>available_tools<|end_of_role|>' }}\n {{- available_tools | tojson(indent=4) }}\n {{- '<|end_of_text|>\n' }}\n {%- endif %}\n {%- if documents %}\n {%- for document in documents %}\n {{- '<|start_of_role|>document {\"document_id\": \"' + document['doc_id'] | string + '\"}<|end_of_role|>\n' }}\n {{- document['text'] }}\n {{- '<|end_of_text|>\n' }}\n {%- endfor %}\n {%- endif %}\n {%- for message in loop_messages %}\n {{- '<|start_of_role|>' + message['role'] + '<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- if loop.last and add_generation_prompt %}\n {{- '<|start_of_role|>assistant' }}\n {%- if controls %}\n {{- ' ' + controls | tojson()}}\n {%- endif %}\n {{- '<|end_of_role|>' }}\n {%- endif %}\n {%- endfor %}",
225
  "clean_up_tokenization_spaces": true,
226
  "eos_token": "<|end_of_text|>",
227
  "errors": "replace",
228
  "extra_special_tokens": {},
229
- "legacy": true,
230
  "model_max_length": 9223372036854775807,
231
  "pad_token": "<|end_of_text|>",
232
  "padding_side": "left",
 
221
  "<|end_of_plugin|>"
222
  ],
223
  "bos_token": "<|end_of_text|>",
224
+ "chat_template": "\n{# Alias tools -> available_tools #}\n{%- if tools and not available_tools -%}\n {%- set available_tools = tools -%}\n{%- endif -%}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n {%- else %}\n {%- set system_message = \"Knowledge Cutoff Date: April 2024.\nToday's Date: \" + strftime_now('%B %d, %Y') + \".\nYou are Granite, developed by IBM.\" %}\n {%- if available_tools and documents %}\n {%- set system_message = system_message + \" You are a helpful assistant with access to the following tools. When a tool is required to answer the user's query, respond only with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request.\nWrite the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}\n {%- elif available_tools %}\n {%- set system_message = system_message + \" You are a helpful assistant with access to the following tools. When a tool is required to answer the user's query, respond only with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request.\" %}\n {%- elif documents %}\n {%- set system_message = system_message + \" Write the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}\n {%- elif thinking %}\n {%- set system_message = system_message + \" You are a helpful AI assistant.\nRespond to every user query in a comprehensive and detailed way. You can write down your thoughts and reasoning process before responding. In the thought process, engage in a comprehensive cycle of analysis, summarization, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. In the response section, based on various attempts, explorations, and reflections from the thoughts section, systematically present the final solution that you deem correct. The response should summarize the thought process. Write your thoughts between <think></think> and write your response between <response></response> for each user query.\" %}\n {%- else %}\n {%- set system_message = system_message + \" You are a helpful AI assistant.\" %}\n {%- endif %}\n {%- if 'citations' in controls and documents %}\n {%- set system_message = system_message + '\nUse the symbols <|start_of_cite|> and <|end_of_cite|> to indicate when a fact comes from a document in the search result, e.g <|start_of_cite|> {document_id: 1}my fact <|end_of_cite|> for a fact from document 1. Afterwards, list all the citations with their corresponding documents in an ordered list.' %}\n {%- endif %}\n {%- if 'hallucinations' in controls and documents %}\n {%- set system_message = system_message + '\nFinally, after the response is written, include a numbered list of sentences from the response with a corresponding risk value that are hallucinated and not based in the documents.' %}\n {%- endif %}\n {%- set loop_messages = messages %}\n {%- endif %}\n {{- '<|start_of_role|>system<|end_of_role|>' + system_message + '<|end_of_text|>\n' }}\n {%- if available_tools %}\n {{- '<|start_of_role|>available_tools<|end_of_role|>' }}\n {{- available_tools | tojson(indent=4) }}\n {{- '<|end_of_text|>\n' }}\n {%- endif %}\n {%- if documents %}\n {%- for document in documents %}\n {{- '<|start_of_role|>document {\"document_id\": \"' + document['doc_id'] | string + '\"}<|end_of_role|>\n' }}\n {{- document['text'] }}\n {{- '<|end_of_text|>\n' }}\n {%- endfor %}\n {%- endif %}\n {%- for message in loop_messages %}\n {{- '<|start_of_role|>' + message['role'] + '<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- if loop.last and add_generation_prompt %}\n {{- '<|start_of_role|>assistant' }}\n {%- if controls %}\n {{- ' ' + controls | tojson()}}\n {%- endif %}\n {{- '<|end_of_role|>' }}\n {%- endif %}\n {%- endfor %}\n",
225
  "clean_up_tokenization_spaces": true,
226
  "eos_token": "<|end_of_text|>",
227
  "errors": "replace",
228
  "extra_special_tokens": {},
 
229
  "model_max_length": 9223372036854775807,
230
  "pad_token": "<|end_of_text|>",
231
  "padding_side": "left",
citations/lora/granite-3.3-8b-instruct/io.yaml ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model name string, or null to use whatever is provided in the chat completion request
2
+ model: ~
3
+ # JSON schema of the model's output
4
+ response_format: |
5
+ {
6
+ "$defs": {
7
+ "_MODEL_OUTPUT_ENTRY": {
8
+ "properties": {
9
+ "r": {
10
+ "minimum": 0,
11
+ "title": "R",
12
+ "type": "integer"
13
+ },
14
+ "c": {
15
+ "items": {
16
+ "minimum": 0,
17
+ "type": "integer"
18
+ },
19
+ "title": "C",
20
+ "type": "array"
21
+ }
22
+ },
23
+ "required": [
24
+ "r",
25
+ "c"
26
+ ],
27
+ "title": "_MODEL_OUTPUT_ENTRY",
28
+ "type": "object"
29
+ }
30
+ },
31
+ "items": {
32
+ "$ref": "#/$defs/_MODEL_OUTPUT_ENTRY"
33
+ },
34
+ "title": "_MODEL_OUTPUT",
35
+ "type": "array"
36
+ }
37
+ transformations:
38
+ # Explode the list of document sentences in each citation
39
+ - type: explode
40
+ input_path: [] # Zero-length path means match root element
41
+ target_field: "c"
42
+ # Model may repeat itself; drop the resulting duplicates.
43
+ - type: drop_duplicates
44
+ input_path: [] # Zero-length path means match root element
45
+ target_fields: ["r", "c"]
46
+ # Replace sentence number with sentence location and contents.
47
+ # Do this first for sentences from the last turn, then for sentences from documents.
48
+ - type: decode_sentences
49
+ source: "last_message"
50
+ input_path: [~, "r"] # Null in path means wildcard
51
+ # New fields to add for each sentence
52
+ output_names:
53
+ begin: "response_begin"
54
+ end: "response_end"
55
+ text: "response_text"
56
+ - type: decode_sentences
57
+ source: "documents"
58
+ input_path: [~, "c"] # Null in path means wildcard
59
+ # New fields to add for each sentence
60
+ output_names:
61
+ document_id: "citation_doc_id"
62
+ begin: "citation_begin"
63
+ end: "citation_end"
64
+ text: "citation_text"
65
+ # Remove fields that we no longer need
66
+ - type: project
67
+ input_path: []
68
+ retained_fields:
69
+ - "response_begin"
70
+ - "response_end"
71
+ - "response_text"
72
+ - "citation_doc_id"
73
+ - "citation_begin"
74
+ - "citation_end"
75
+ - "citation_text"
76
+ # Merge adjacent document spans
77
+ - type: merge_spans
78
+ input_path: []
79
+ group_fields: ["response_begin", "response_end", "response_text", "citation_doc_id"]
80
+ begin_field: "citation_begin"
81
+ end_field: "citation_end"
82
+ text_field: "citation_text"
83
+
84
+ instruction: >
85
+ Split the last assistant response into individual sentences.
86
+ For each sentence in the response, identify the statement IDs from the below
87
+ documents that it references. Ensure that your output includes all response
88
+ sentence IDs, and for each response sentence ID, provide the list of corresponding
89
+ referring document sentence IDs. The output must be a json structure.
90
+ parameters:
91
+ max_completion_tokens: 4096
92
+ sentence_boundaries:
93
+ # Mapping from string location to sentence delimiter prefix
94
+ last_message: "r" # <r0>, <r1>, etc.
95
+ documents: "c"
96
+