NemoVonNirgend commited on
Commit
83cf45b
·
verified ·
1 Parent(s): 83e5c24

Training in progress, step 500

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: unsloth/GLM-4.6V-Flash
3
+ library_name: transformers
4
+ model_name: glm-4v-flash-reasoning
5
+ tags:
6
+ - generated_from_trainer
7
+ - hf_jobs
8
+ - trl
9
+ - sft
10
+ - unsloth
11
+ licence: license
12
+ ---
13
+
14
+ # Model Card for glm-4v-flash-reasoning
15
+
16
+ This model is a fine-tuned version of [unsloth/GLM-4.6V-Flash](https://huggingface.co/unsloth/GLM-4.6V-Flash).
17
+ It has been trained using [TRL](https://github.com/huggingface/trl).
18
+
19
+ ## Quick start
20
+
21
+ ```python
22
+ from transformers import pipeline
23
+
24
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
25
+ generator = pipeline("text-generation", model="RoleModel/glm-4v-flash-reasoning", device="cuda")
26
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
27
+ print(output["generated_text"])
28
+ ```
29
+
30
+ ## Training procedure
31
+
32
+
33
+
34
+
35
+ This model was trained with SFT.
36
+
37
+ ### Framework versions
38
+
39
+ - TRL: 0.26.2
40
+ - Transformers: 5.0.0rc1
41
+ - Pytorch: 2.9.1
42
+ - Datasets: 4.3.0
43
+ - Tokenizers: 0.22.1
44
+
45
+ ## Citations
46
+
47
+
48
+
49
+ Cite TRL as:
50
+
51
+ ```bibtex
52
+ @misc{vonwerra2022trl,
53
+ title = {{TRL: Transformer Reinforcement Learning}},
54
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
55
+ year = 2020,
56
+ journal = {GitHub repository},
57
+ publisher = {GitHub},
58
+ howpublished = {\url{https://github.com/huggingface/trl}}
59
+ }
60
+ ```
adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": {
6
+ "base_model_class": "Glm4vForConditionalGeneration",
7
+ "parent_library": "transformers.models.glm4v.modeling_glm4v",
8
+ "unsloth_fixed": true
9
+ },
10
+ "base_model_name_or_path": "unsloth/GLM-4.6V-Flash",
11
+ "bias": "none",
12
+ "corda_config": null,
13
+ "ensure_weight_tying": false,
14
+ "eva_config": null,
15
+ "exclude_modules": null,
16
+ "fan_in_fan_out": false,
17
+ "inference_mode": true,
18
+ "init_lora_weights": true,
19
+ "layer_replication": null,
20
+ "layers_pattern": null,
21
+ "layers_to_transform": null,
22
+ "loftq_config": {},
23
+ "lora_alpha": 128,
24
+ "lora_bias": false,
25
+ "lora_dropout": 0,
26
+ "megatron_config": null,
27
+ "megatron_core": "megatron.core",
28
+ "modules_to_save": null,
29
+ "peft_type": "LORA",
30
+ "peft_version": "0.18.0",
31
+ "qalora_group_size": 16,
32
+ "r": 64,
33
+ "rank_pattern": {},
34
+ "revision": null,
35
+ "target_modules": "(?:.*?(?:language|text).*?(?:self_attn|attention|attn|mlp|feed_forward|ffn|dense).*?(?:qkv|proj|gate_proj|up_proj|down_proj|q_proj|k_proj|v_proj|o_proj|gate_up_proj).*?)|(?:\\bmodel\\.layers\\.[\\d]{1,}\\.(?:self_attn|attention|attn|mlp|feed_forward|ffn|dense)\\.(?:(?:qkv|proj|gate_proj|up_proj|down_proj|q_proj|k_proj|v_proj|o_proj|gate_up_proj)))",
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": true
42
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5df502257c5ca4a793458ba6bc8df121da8a3dd35f6b1a1a267ee588e759f626
3
+ size 380837008
chat_template.jinja ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {# Unsloth template fixes #}
2
+ [gMASK]<sop>
3
+ {%- if tools -%}
4
+ <|system|>
5
+ # Tools
6
+
7
+ You may call one or more functions to assist with the user query.
8
+
9
+ You are provided with function signatures within <tools></tools> XML tags:
10
+ <tools>
11
+ {% for tool in tools %}
12
+ {{ tool | tojson|string }}
13
+ {% endfor %}
14
+ </tools>
15
+
16
+ For each function call, output the function name and arguments within the following XML format:
17
+ <tool_call>{function-name}
18
+ <arg_key>{arg-key-1}</arg_key>
19
+ <arg_value>{arg-value-1}</arg_value>
20
+ <arg_key>{arg-key-2}</arg_key>
21
+ <arg_value>{arg-value-2}</arg_value>
22
+ ...
23
+ </tool_call>{%- endif -%}
24
+ {%- macro visible_text(content) -%}
25
+ {%- if content is string -%}
26
+ {{- content }}
27
+ {%- elif content is iterable and content is not mapping -%}
28
+ {%- for item in content -%}
29
+ {%- if item is mapping and item.type == 'text' -%}
30
+ {{- item.text }}
31
+ {%- elif item is mapping and (item.type == 'image' or 'image' in item) -%}
32
+ <|begin_of_image|><|image|><|end_of_image|>
33
+ {%- elif item is mapping and (item.type == 'video' or 'video' in item) -%}
34
+ <|begin_of_video|><|video|><|end_of_video|>
35
+ {%- elif item is string -%}
36
+ {{- item }}
37
+ {%- endif -%}
38
+ {%- endfor -%}
39
+ {%- else -%}
40
+ {{- content }}
41
+ {%- endif -%}
42
+ {%- endmacro -%}
43
+ {%- set ns = namespace(last_user_index=-1) %}
44
+ {%- for m in messages %}
45
+ {%- if m.role == 'user' %}
46
+ {% set ns.last_user_index = loop.index0 -%}
47
+ {%- endif %}
48
+ {%- endfor %}
49
+ {% for m in messages %}
50
+ {%- if m.role == 'user' -%}<|user|>
51
+ {% if m.content is string %}
52
+ {{ m.content }}
53
+ {%- else %}
54
+ {%- for item in m.content %}
55
+ {% if item.type == 'video' or 'video' in item %}
56
+ <|begin_of_video|><|video|><|end_of_video|>{% elif item.type == 'image' or 'image' in item %}
57
+ <|begin_of_image|><|image|><|end_of_image|>{% elif item.type == 'text' %}
58
+ {{ item.text }}
59
+ {%- endif %}
60
+ {%- endfor %}
61
+ {%- endif %}
62
+ {{- '/nothink' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("/nothink")) else '' -}}
63
+ {%- elif m.role == 'assistant' -%}
64
+ <|assistant|>
65
+ {%- set reasoning_content = '' %}
66
+ {%- set content = visible_text(m.content) %}
67
+ {%- if m.reasoning_content is string %}
68
+ {%- set reasoning_content = m.reasoning_content %}
69
+ {%- else %}
70
+ {%- if '</think>' in content %}
71
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
72
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
73
+ {%- endif %}
74
+ {%- endif %}
75
+ {%- if loop.index0 > ns.last_user_index and reasoning_content -%}
76
+ {{ '\n<think>' + reasoning_content.strip() + '</think>'}}
77
+ {%- else -%}
78
+ {{ '\n<think></think>' }}
79
+ {%- endif -%}
80
+ {%- if content.strip() -%}
81
+ {{ '\n' + content.strip() }}
82
+ {%- endif -%}
83
+ {% if m.tool_calls %}
84
+ {% for tc in m.tool_calls %}
85
+ {%- if tc.function %}
86
+ {%- set tc = tc.function %}
87
+ {%- endif %}
88
+ {{ '\n<tool_call>' + tc.name }}
89
+ {% set _args = tc.arguments %}
90
+ {%- if _args is mapping %}{% for k, v in _args|items %}
91
+ <arg_key>{{ k }}</arg_key>
92
+ <arg_value>{{ v | tojson|string if v is not string else v }}</arg_value>
93
+ {% endfor %}{%- endif %}
94
+ </tool_call>{% endfor %}
95
+ {% endif %}
96
+ {%- elif m.role == 'tool' -%}
97
+ {%- if m.content is string -%}
98
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
99
+ {{- '<|observation|>' }}
100
+ {%- endif %}
101
+ {{- '\n<tool_response>\n' }}
102
+ {{- m.content }}
103
+ {{- '\n</tool_response>' }}
104
+ {% elif m.content is iterable and m.content is not mapping %}
105
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
106
+ {{- '<|observation|>' }}
107
+ {%- endif %}
108
+ {{- '\n<tool_response>\n' }}
109
+ {%- for tr in m.content -%}
110
+ {%- if tr is mapping and tr.type is defined -%}
111
+ {%- set t = tr.type | lower -%}
112
+ {%- if t == 'text' and tr.text is defined -%}
113
+ {{ tr.text }}
114
+ {%- elif t in ['image', 'image_url'] -%}
115
+ <|begin_of_image|><|image|><|end_of_image|>
116
+ {%- elif t in ['video', 'video_url'] -%}
117
+ <|begin_of_video|><|video|><|end_of_video|>
118
+ {%- else -%}
119
+ {{ tr | tojson|string }}
120
+ {%- endif -%}
121
+ {%- else -%}
122
+ {{ tr.output if tr.output is defined else tr }}
123
+ {%- endif -%}
124
+ {%- endfor -%}
125
+ {{- '\n</tool_response>' }}
126
+ {%- else -%}
127
+ <|observation|>{% for tr in m.content %}
128
+
129
+ <tool_response>
130
+ {{ tr.output if tr.output is defined else tr }}
131
+ </tool_response>{% endfor -%}
132
+ {% endif -%}
133
+ {%- elif m.role == 'system' -%}
134
+ <|system|>
135
+ {{ visible_text(m.content) }}
136
+ {%- endif -%}
137
+ {%- endfor -%}
138
+ {%- if add_generation_prompt -%}
139
+ <|assistant|>
140
+ {{'<think></think>\n' if (enable_thinking is defined and not enable_thinking) else ''}}
141
+ {%- endif -%}
142
+ {# Copyright 2025-present Unsloth. Apache 2.0 License. #}
processor_config.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_processor": {
3
+ "do_convert_rgb": true,
4
+ "do_normalize": true,
5
+ "do_rescale": true,
6
+ "do_resize": true,
7
+ "image_mean": [
8
+ 0.48145466,
9
+ 0.4578275,
10
+ 0.40821073
11
+ ],
12
+ "image_processor_type": "Glm46VImageProcessor",
13
+ "image_std": [
14
+ 0.26862954,
15
+ 0.26130258,
16
+ 0.27577711
17
+ ],
18
+ "merge_size": 2,
19
+ "patch_size": 14,
20
+ "processor_class": "Glm46VProcessor",
21
+ "resample": 3,
22
+ "rescale_factor": 0.00392156862745098,
23
+ "size": {
24
+ "longest_edge": 9633792,
25
+ "shortest_edge": 12544
26
+ },
27
+ "temporal_patch_size": 2
28
+ },
29
+ "processor_class": "Glm46VProcessor",
30
+ "video_processor": {
31
+ "crop_size": null,
32
+ "data_format": "channels_first",
33
+ "default_to_square": true,
34
+ "device": null,
35
+ "do_center_crop": null,
36
+ "do_convert_rgb": true,
37
+ "do_normalize": true,
38
+ "do_pad": null,
39
+ "do_rescale": true,
40
+ "do_resize": true,
41
+ "do_sample_frames": true,
42
+ "fps": 2,
43
+ "image_mean": [
44
+ 0.48145466,
45
+ 0.4578275,
46
+ 0.40821073
47
+ ],
48
+ "image_std": [
49
+ 0.26862954,
50
+ 0.26130258,
51
+ 0.27577711
52
+ ],
53
+ "input_data_format": null,
54
+ "max_duration": 300,
55
+ "max_image_size": {
56
+ "longest_edge": 47040000
57
+ },
58
+ "merge_size": 2,
59
+ "num_frames": 16,
60
+ "pad_size": null,
61
+ "patch_size": 14,
62
+ "processor_class": "Glm46VProcessor",
63
+ "resample": 3,
64
+ "rescale_factor": 0.00392156862745098,
65
+ "return_metadata": false,
66
+ "return_tensors": null,
67
+ "size": {
68
+ "longest_edge": 100352000,
69
+ "shortest_edge": 12544
70
+ },
71
+ "temporal_patch_size": 2,
72
+ "video_metadata": null,
73
+ "video_processor_type": "Glm46VVideoProcessor"
74
+ }
75
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0ccf2252fe9cd23ada23a829ec409aab397dbd2ac4f372d3a1a23d1f7c72d6b
3
+ size 19970686
tokenizer_config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "[MASK]",
5
+ "[gMASK]",
6
+ "[sMASK]",
7
+ "<sop>",
8
+ "<eop>",
9
+ "<|system|>",
10
+ "<|user|>",
11
+ "<|assistant|>",
12
+ "<|observation|>",
13
+ "<|begin_of_image|>",
14
+ "<|end_of_image|>",
15
+ "<|begin_of_video|>",
16
+ "<|end_of_video|>",
17
+ "<|begin_of_audio|>",
18
+ "<|end_of_audio|>",
19
+ "<|image|>",
20
+ "<|video|>",
21
+ "<|begin_of_transcription|>",
22
+ "<|end_of_transcription|>",
23
+ "<|code_prefix|>",
24
+ "<|code_middle|>",
25
+ "<|code_suffix|>",
26
+ "/nothink"
27
+ ],
28
+ "backend": "tokenizers",
29
+ "bos_token": null,
30
+ "clean_up_tokenization_spaces": false,
31
+ "do_lower_case": false,
32
+ "eos_token": "<|endoftext|>",
33
+ "extra_special_tokens": [
34
+ "<|endoftext|>",
35
+ "[MASK]",
36
+ "[gMASK]",
37
+ "[sMASK]",
38
+ "<sop>",
39
+ "<eop>",
40
+ "<|system|>",
41
+ "<|user|>",
42
+ "<|assistant|>",
43
+ "<|observation|>",
44
+ "<|begin_of_image|>",
45
+ "<|end_of_image|>",
46
+ "<|begin_of_video|>",
47
+ "<|end_of_video|>",
48
+ "<|begin_of_audio|>",
49
+ "<|end_of_audio|>",
50
+ "<|image|>",
51
+ "<|video|>",
52
+ "<|begin_of_transcription|>",
53
+ "<|end_of_transcription|>",
54
+ "<|code_prefix|>",
55
+ "<|code_middle|>",
56
+ "<|code_suffix|>",
57
+ "/nothink"
58
+ ],
59
+ "is_local": false,
60
+ "model_max_length": 131072,
61
+ "model_specific_special_tokens": {},
62
+ "pad_token": "[MASK]",
63
+ "padding_side": "right",
64
+ "processor_class": "Glm46VProcessor",
65
+ "remove_space": false,
66
+ "tokenizer_class": "TokenizersBackend",
67
+ "unk_token": null
68
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:897c295a57ecbe9b68b72a10eec0b37bb8233731ab8ad59ca39449a1e44f9daf
3
+ size 5777