isaacOnline commited on
Commit
6a752d9
1 Parent(s): 7585aa0

Training in progress, step 20

Browse files
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "tiiuae/falcon-7b-instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -19,10 +19,13 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "dense_4h_to_h",
23
- "dense_h_to_4h",
24
- "query_key_value",
25
- "dense"
 
 
 
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.1",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "up_proj",
23
+ "k_proj",
24
+ "down_proj",
25
+ "v_proj",
26
+ "gate_proj",
27
+ "o_proj",
28
+ "q_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8beee3b5b86188515676fff03ab6343d2c6cfc17cfe17a533e8767e8108eff42
3
- size 261131840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b44c6fdabc095f8f8c47e3ecdde1d7892205596b642550286e63d4dc45b54440
3
+ size 335604696
qual_clasification.log CHANGED
@@ -53,3 +53,10 @@
53
  2024-02-11 19:14:07,990 - INFO - __main__ - Loaded Model ID: tiiuae/falcon-7b-instruct
54
  2024-02-11 19:14:08,914 - INFO - __main__ - Loaded LoRA Model
55
  2024-02-11 19:14:09,545 - INFO - __main__ - Instantiated Trainer
 
 
 
 
 
 
 
 
53
  2024-02-11 19:14:07,990 - INFO - __main__ - Loaded Model ID: tiiuae/falcon-7b-instruct
54
  2024-02-11 19:14:08,914 - INFO - __main__ - Loaded LoRA Model
55
  2024-02-11 19:14:09,545 - INFO - __main__ - Instantiated Trainer
56
+ 2024-02-12 00:06:16,122 - INFO - __main__ - Completed fine-tuning
57
+ 2024-02-12 00:06:18,016 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
58
+ 2024-02-12 00:06:20,394 - INFO - __main__ - Saved model to hub
59
+ 2024-02-12 00:08:03,435 - INFO - __main__ - Completed EM Metrics evaluation
60
+ 2024-02-12 00:09:33,450 - INFO - __main__ - Loaded Model ID: mistralai/Mistral-7B-Instruct-v0.1
61
+ 2024-02-12 00:09:34,543 - INFO - __main__ - Loaded LoRA Model
62
+ 2024-02-12 00:09:35,118 - INFO - __main__ - Instantiated Trainer
qual_clasification_outputs.csv CHANGED
@@ -396,7 +396,7 @@ Qualitative Keywords:
396
  fieldwork",QUANTITATIVE,QUANTITATIVE,NON_QUALITATIVE,NON_QUALITATIVE,"QUANTITATIVE
397
 
398
  Assistant:
399
- MI",RESOLVABLE
400
  29,"Grant Abstract:
401
  University of Florida graduate student, Stacey A. Giroux, under the direction of Dr. H. Russell Bernard, will undertake sociocultural research on how changes in national dietary cultures are understood and acted upon at the level of the individual and household. There is abundant evidence that eating practices are changing worldwide, converging toward similar diets that are high in fat and sugar and low in fiber. Many nations also have escalating rates of overweight and obesity. This project will investigate the social and cultural foundations of this global phenomenon.
402
 
 
396
  fieldwork",QUANTITATIVE,QUANTITATIVE,NON_QUALITATIVE,NON_QUALITATIVE,"QUANTITATIVE
397
 
398
  Assistant:
399
+ QUAL",RESOLVABLE
400
  29,"Grant Abstract:
401
  University of Florida graduate student, Stacey A. Giroux, under the direction of Dr. H. Russell Bernard, will undertake sociocultural research on how changes in national dietary cultures are understood and acted upon at the level of the individual and household. There is abundant evidence that eating practices are changing worldwide, converging toward similar diets that are high in fat and sugar and low in fiber. Many nations also have escalating rates of overweight and obesity. This project will investigate the social and cultural foundations of this global phenomenon.
402
 
special_tokens_map.json CHANGED
@@ -1,23 +1,24 @@
1
  {
2
- "additional_special_tokens": [
3
- ">>TITLE<<",
4
- ">>ABSTRACT<<",
5
- ">>INTRODUCTION<<",
6
- ">>SUMMARY<<",
7
- ">>COMMENT<<",
8
- ">>ANSWER<<",
9
- ">>QUESTION<<",
10
- ">>DOMAIN<<",
11
- ">>PREFIX<<",
12
- ">>SUFFIX<<",
13
- ">>MIDDLE<<"
14
- ],
15
  "eos_token": {
16
- "content": "<|endoftext|>",
17
  "lstrip": false,
18
  "normalized": false,
19
  "rstrip": false,
20
  "single_word": false
21
  },
22
- "pad_token": "<|endoftext|>"
 
 
 
 
 
 
 
23
  }
 
1
  {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
 
 
 
 
 
 
9
  "eos_token": {
10
+ "content": "</s>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
- "add_prefix_space": false,
 
3
  "added_tokens_decoder": {
4
  "0": {
5
- "content": ">>TITLE<<",
6
  "lstrip": false,
7
  "normalized": false,
8
  "rstrip": false,
@@ -10,7 +11,7 @@
10
  "special": true
11
  },
12
  "1": {
13
- "content": ">>ABSTRACT<<",
14
  "lstrip": false,
15
  "normalized": false,
16
  "rstrip": false,
@@ -18,79 +19,7 @@
18
  "special": true
19
  },
20
  "2": {
21
- "content": ">>INTRODUCTION<<",
22
- "lstrip": false,
23
- "normalized": false,
24
- "rstrip": false,
25
- "single_word": false,
26
- "special": true
27
- },
28
- "3": {
29
- "content": ">>SUMMARY<<",
30
- "lstrip": false,
31
- "normalized": false,
32
- "rstrip": false,
33
- "single_word": false,
34
- "special": true
35
- },
36
- "4": {
37
- "content": ">>COMMENT<<",
38
- "lstrip": false,
39
- "normalized": false,
40
- "rstrip": false,
41
- "single_word": false,
42
- "special": true
43
- },
44
- "5": {
45
- "content": ">>ANSWER<<",
46
- "lstrip": false,
47
- "normalized": false,
48
- "rstrip": false,
49
- "single_word": false,
50
- "special": true
51
- },
52
- "6": {
53
- "content": ">>QUESTION<<",
54
- "lstrip": false,
55
- "normalized": false,
56
- "rstrip": false,
57
- "single_word": false,
58
- "special": true
59
- },
60
- "7": {
61
- "content": ">>DOMAIN<<",
62
- "lstrip": false,
63
- "normalized": false,
64
- "rstrip": false,
65
- "single_word": false,
66
- "special": true
67
- },
68
- "8": {
69
- "content": ">>PREFIX<<",
70
- "lstrip": false,
71
- "normalized": false,
72
- "rstrip": false,
73
- "single_word": false,
74
- "special": true
75
- },
76
- "9": {
77
- "content": ">>SUFFIX<<",
78
- "lstrip": false,
79
- "normalized": false,
80
- "rstrip": false,
81
- "single_word": false,
82
- "special": true
83
- },
84
- "10": {
85
- "content": ">>MIDDLE<<",
86
- "lstrip": false,
87
- "normalized": false,
88
- "rstrip": false,
89
- "single_word": false,
90
- "special": true
91
- },
92
- "11": {
93
- "content": "<|endoftext|>",
94
  "lstrip": false,
95
  "normalized": false,
96
  "rstrip": false,
@@ -98,26 +27,17 @@
98
  "special": true
99
  }
100
  },
101
- "additional_special_tokens": [
102
- ">>TITLE<<",
103
- ">>ABSTRACT<<",
104
- ">>INTRODUCTION<<",
105
- ">>SUMMARY<<",
106
- ">>COMMENT<<",
107
- ">>ANSWER<<",
108
- ">>QUESTION<<",
109
- ">>DOMAIN<<",
110
- ">>PREFIX<<",
111
- ">>SUFFIX<<",
112
- ">>MIDDLE<<"
113
- ],
114
- "clean_up_tokenization_spaces": true,
115
- "eos_token": "<|endoftext|>",
116
- "model_input_names": [
117
- "input_ids",
118
- "attention_mask"
119
- ],
120
- "model_max_length": 2048,
121
- "pad_token": "<|endoftext|>",
122
- "tokenizer_class": "PreTrainedTokenizerFast"
123
  }
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
+ "content": "<unk>",
7
  "lstrip": false,
8
  "normalized": false,
9
  "rstrip": false,
 
11
  "special": true
12
  },
13
  "1": {
14
+ "content": "<s>",
15
  "lstrip": false,
16
  "normalized": false,
17
  "rstrip": false,
 
19
  "special": true
20
  },
21
  "2": {
22
+ "content": "</s>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  "lstrip": false,
24
  "normalized": false,
25
  "rstrip": false,
 
27
  "special": true
28
  }
29
  },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": true,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
 
 
 
 
 
 
 
 
 
43
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1644083d38e0bb2c00d93e760709cd825c88ee0f81f16b32e50db5d6ec5db57
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:012998525ab2ef532484346fdf004a1d7e8a3becdab229bdfbecc366e11e46cb
3
  size 4728