gchhablani commited on
Commit
3a2e60d
1 Parent(s): 335a2f6

Add initial files

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ *.pyc
app.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ import streamlit as st
3
+ import pandas as pd
4
+ import json
5
+ import os
6
+ import numpy as np
7
+ from streamlit.elements import markdown
8
+ from PIL import Image
9
+ from model.flax_clip_vision_marian import (
10
+ FlaxCLIPVisionMarianMT,
11
+ )
12
+ from transformers import MarianTokenizer
13
+ from utils import (
14
+ get_transformed_image,
15
+ )
16
+ import matplotlib.pyplot as plt
17
+ from mtranslate import translate
18
+
19
+
20
+ from session import _get_state
21
+
22
+ state = _get_state()
23
+
24
+
25
+ @st.cache
26
+ def load_model(ckpt):
27
+ return FlaxCLIPVisionMarianMT.from_pretrained(ckpt)
28
+
29
+
30
+ tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es")
31
+
32
+ @st.cache(persist=True)
33
+ def generate_sequence(pixel_values, num_beams, temperature, top_p):
34
+ output_ids = model.generate(input_ids=pixel_values, max_length=64, num_beams=num_beams, temperature=temperature, top_p = top_p)
35
+ print(output_ids)
36
+ output_sequence = tokenizer.batch_decode(output_ids[0], skip_special_tokens=True, max_length=64)
37
+ return output_sequence
38
+
39
+ def read_markdown(path, parent="./sections/"):
40
+ with open(os.path.join(parent, path)) as f:
41
+ return f.read()
42
+
43
+
44
+ checkpoints = ["./ckpt/ckpt-17499"] # TODO: Maybe add more checkpoints?
45
+ dummy_data = pd.read_csv("reference.tsv", sep="\t")
46
+
47
+ st.set_page_config(
48
+ page_title="Multilingual Image Captioning",
49
+ layout="wide",
50
+ initial_sidebar_state="collapsed",
51
+ page_icon="./misc/mic-logo.png",
52
+ )
53
+
54
+ st.title("Multilingual Image Captioning")
55
+ st.write(
56
+ "[Bhavitvya Malik](https://huggingface.co/bhavitvyamalik), [Gunjan Chhablani](https://huggingface.co/gchhablani)"
57
+ )
58
+
59
+ st.sidebar.title("Generation Parameters")
60
+ num_beams = st.sidebar.number_input(label="Number of Beams", min_value=2, max_value=10, value=4, step=1, help="Number of beams to be used in beam search.")
61
+ temperature = st.sidebar.select_slider(label="Temperature", options = np.arange(0.0,1.1, step=0.1), value=1.0, help ="The value used to module the next token probabilities.", format_func=lambda x: f"{x:.2f}")
62
+ top_p = st.sidebar.select_slider(label = "Top-P", options = np.arange(0.0,1.1, step=0.1),value=1.0, help="Nucleus Sampling : If set to float < 1, only the most probable tokens with probabilities that add up to :obj:`top_p` or higher are kept for generation.", format_func=lambda x: f"{x:.2f}")
63
+
64
+
65
+ image_col, intro_col = st.beta_columns([3, 8])
66
+ # image_col.image("./misc/sic-logo.png", use_column_width="always")
67
+ intro_col.write(read_markdown("intro.md"))
68
+
69
+ with st.beta_expander("Usage"):
70
+ st.markdown(read_markdown("usage.md"))
71
+
72
+ with st.beta_expander("Article"):
73
+ st.write(read_markdown("abstract.md"))
74
+ st.write(read_markdown("caveats.md"))
75
+ # st.write("# Methodology")
76
+ # st.image(
77
+ # "./misc/Multilingual-IC.png", caption="Seq2Seq model for Image-text Captioning."
78
+ # )
79
+ st.markdown(read_markdown("pretraining.md"))
80
+ st.write(read_markdown("challenges.md"))
81
+ st.write(read_markdown("social_impact.md"))
82
+ st.write(read_markdown("references.md"))
83
+ # st.write(read_markdown("checkpoints.md"))
84
+ st.write(read_markdown("acknowledgements.md"))
85
+
86
+
87
+ first_index = 20
88
+ # Init Session State
89
+ if state.image_file is None:
90
+ state.image_file = dummy_data.loc[first_index, "image_file"]
91
+ state.caption = dummy_data.loc[first_index, "caption"].strip("- ")
92
+
93
+ image_path = os.path.join("images", state.image_file)
94
+ image = plt.imread(image_path)
95
+ state.image = image
96
+
97
+ # col1, col2 = st.beta_columns([6, 4])
98
+
99
+ if st.button("Get a random example", help="Get a random example from one of the seeded examples."):
100
+ sample = dummy_data.sample(1).reset_index()
101
+ state.image_file = sample.loc[0, "image_file"]
102
+ state.caption = sample.loc[0, "caption"].strip("- ")
103
+
104
+ image_path = os.path.join("images", state.image_file)
105
+ image = plt.imread(image_path)
106
+ state.image = image
107
+
108
+ # col2.write("OR")
109
+
110
+ # uploaded_file = col2.file_uploader("Upload your image", type=["png", "jpg", "jpeg"])
111
+ # if uploaded_file is not None:
112
+ # state.image_file = os.path.join("images", uploaded_file.name)
113
+ # state.image = np.array(Image.open(uploaded_file))
114
+
115
+ transformed_image = get_transformed_image(state.image)
116
+
117
+ new_col1, new_col2 = st.beta_columns([5,5])
118
+ # Display Image
119
+ new_col1.image(state.image, use_column_width="always")
120
+
121
+
122
+ # Display Reference Caption
123
+ new_col2.write("**Reference Caption**: " + state.caption)
124
+ new_col2.markdown(
125
+ f"""**English Translation**: {translate(state.caption, 'en')}"""
126
+ )
127
+
128
+ with st.spinner("Loading model..."):
129
+ model = load_model(checkpoints[0])
130
+ sequence = ['']
131
+ if new_col2.button("Generate Caption", help="Generate a caption in the specified language."):
132
+ with st.spinner("Generating Sequence..."):
133
+ sequence = generate_sequence(transformed_image, num_beams, temperature, top_p)
134
+ # print(sequence)
135
+
136
+ if sequence!=['']:
137
+ st.write(
138
+ "**Generated Caption**: "+sequence[0]
139
+ )
140
+
141
+ st.write(
142
+ "**English Translation**: "+ translate(sequence[0])
143
+ )
model/__init__.py ADDED
File without changes
model/flax_clip_vision_marian/__init__.py ADDED
File without changes
model/flax_clip_vision_marian/configuration_clip_vision_marian.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+
3
+ from transformers import CLIPVisionConfig, MarianConfig
4
+ from transformers.configuration_utils import PretrainedConfig
5
+ from transformers.utils import logging
6
+
7
+ logger = logging.get_logger(__name__)
8
+
9
+
10
+ class CLIPVisionMarianConfig(PretrainedConfig):
11
+
12
+ model_type = "clip-vision-marian"
13
+ is_composition = True
14
+
15
+ def __init__(self, **kwargs):
16
+ super().__init__(**kwargs)
17
+
18
+ if "marian_config" not in kwargs:
19
+ raise ValueError("`marian_config` can not be `None`.")
20
+
21
+ if "clip_vision_config" not in kwargs:
22
+ raise ValueError("`clip_vision_config` can not be `None`.")
23
+
24
+ marian_config = kwargs.pop("marian_config")
25
+ clip_vision_config = kwargs.pop("clip_vision_config")
26
+
27
+ self.marian_config = MarianConfig(**marian_config)
28
+
29
+ self.clip_vision_config = CLIPVisionConfig(**clip_vision_config)
30
+
31
+ self.is_encoder_decoder = True
32
+
33
+ @classmethod
34
+ def from_clip_vision_marian_configs(
35
+ cls,
36
+ clip_vision_config: PretrainedConfig,
37
+ marian_config: PretrainedConfig,
38
+ **kwargs
39
+ ):
40
+ return cls(
41
+ clip_vision_config=clip_vision_config.to_dict(),
42
+ marian_config=marian_config.to_dict(),
43
+ **kwargs
44
+ )
45
+
46
+ def to_dict(self):
47
+ output = copy.deepcopy(self.__dict__)
48
+ output["clip_vision_config"] = self.clip_vision_config.to_dict()
49
+ output["marian_config"] = self.marian_config.to_dict()
50
+ output["model_type"] = self.__class__.model_type
51
+ return output
model/flax_clip_vision_marian/generation_clip_vision_marian_utils.py ADDED
@@ -0,0 +1,814 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team.
3
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ from functools import partial
19
+ from typing import Dict, Optional
20
+
21
+ import numpy as np
22
+
23
+ import flax
24
+ import jax
25
+ import jax.numpy as jnp
26
+ import jaxlib.xla_extension as jax_xla
27
+ from jax import lax
28
+
29
+ from transformers.file_utils import ModelOutput
30
+ from transformers.generation_flax_logits_process import (
31
+ FlaxForcedBOSTokenLogitsProcessor,
32
+ FlaxForcedEOSTokenLogitsProcessor,
33
+ FlaxLogitsProcessorList,
34
+ FlaxMinLengthLogitsProcessor,
35
+ FlaxTemperatureLogitsWarper,
36
+ FlaxTopKLogitsWarper,
37
+ FlaxTopPLogitsWarper,
38
+ )
39
+ from transformers.utils import logging
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+
45
+ @flax.struct.dataclass
46
+ class FlaxGreedySearchOutput(ModelOutput):
47
+ """
48
+ Flax Base class for outputs of decoder-only generation models using greedy search.
49
+ Args:
50
+ sequences (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, max_length)`):
51
+ The generated sequences.
52
+ """
53
+
54
+ sequences: jax_xla.DeviceArray = None
55
+
56
+
57
+ @flax.struct.dataclass
58
+ class FlaxSampleOutput(ModelOutput):
59
+ """
60
+ Flax Base class for outputs of decoder-only generation models using sampling.
61
+ Args:
62
+ sequences (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, max_length)`):
63
+ The generated sequences.
64
+ """
65
+
66
+ sequences: jax_xla.DeviceArray = None
67
+
68
+
69
+ @flax.struct.dataclass
70
+ class FlaxBeamSearchOutput(ModelOutput):
71
+ """
72
+ Flax Base class for outputs of decoder-only generation models using greedy search.
73
+ Args:
74
+ sequences (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, max_length)`):
75
+ The generated sequences.
76
+ scores (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size,)`):
77
+ The scores (log probabilites) of the generated sequences.
78
+ """
79
+
80
+ sequences: jax_xla.DeviceArray = None
81
+ scores: jax_xla.DeviceArray = None
82
+
83
+
84
+ @flax.struct.dataclass
85
+ class GreedyState:
86
+ cur_len: jax_xla.DeviceArray
87
+ sequences: jax_xla.DeviceArray
88
+ running_token: jax_xla.DeviceArray
89
+ is_sent_finished: jax_xla.DeviceArray
90
+ model_kwargs: Dict[str, jax_xla.DeviceArray]
91
+
92
+
93
+ @flax.struct.dataclass
94
+ class SampleState:
95
+ cur_len: jax_xla.DeviceArray
96
+ sequences: jax_xla.DeviceArray
97
+ running_token: jax_xla.DeviceArray
98
+ is_sent_finished: jax_xla.DeviceArray
99
+ prng_key: jax_xla.DeviceArray
100
+ model_kwargs: Dict[str, jax_xla.DeviceArray]
101
+
102
+
103
+ @flax.struct.dataclass
104
+ class BeamSearchState:
105
+ cur_len: jax_xla.DeviceArray
106
+ running_sequences: jax_xla.DeviceArray
107
+ running_scores: jax_xla.DeviceArray
108
+ sequences: jax_xla.DeviceArray
109
+ scores: jax_xla.DeviceArray
110
+ is_sent_finished: jax_xla.DeviceArray
111
+ model_kwargs: Dict[str, jax_xla.DeviceArray]
112
+
113
+
114
+ class FlaxGenerationMixin:
115
+ """
116
+ A class containing all of the functions supporting generation, to be used as a mixin in
117
+ :class:`~transformers.FlaxPreTrainedModel`.
118
+ """
119
+
120
+ @staticmethod
121
+ def _run_loop_in_debug(cond_fn, body_fn, init_state):
122
+ """
123
+ Run generation in untraced mode. This should only be used for debugging purposes.
124
+ """
125
+ state = init_state
126
+ while cond_fn(state):
127
+ state = body_fn(state)
128
+ return state
129
+
130
+ def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, model_kwargs):
131
+ encoder_kwargs = {
132
+ argument: value
133
+ for argument, value in model_kwargs.items()
134
+ if not (argument.startswith("decoder_") or argument.startswith("cross_attn"))
135
+ }
136
+ model_kwargs["encoder_outputs"] = self.encode(input_ids, return_dict=True, **encoder_kwargs)
137
+ return model_kwargs
138
+
139
+ @staticmethod
140
+ def _expand_to_num_beams(tensor, num_beams):
141
+ return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:])
142
+
143
+ def _adapt_logits_for_beam_search(self, logits):
144
+ """
145
+ This function can be overwritten in the specific modeling_flax_<model-name>.py classes to allow for custom beam
146
+ search behavior. Note that the only model that overwrites this method is
147
+ :class:`~transformes.FlaxMarianMTModel`.
148
+ """
149
+ return logits
150
+
151
+ def generate(
152
+ self,
153
+ input_ids: jax_xla.DeviceArray,
154
+ max_length: Optional[int] = None,
155
+ pad_token_id: Optional[int] = None,
156
+ bos_token_id: Optional[int] = None,
157
+ eos_token_id: Optional[int] = None,
158
+ decoder_start_token_id: Optional[int] = None,
159
+ do_sample: Optional[bool] = None,
160
+ prng_key: Optional[jax_xla.DeviceArray] = None,
161
+ top_k: Optional[int] = None,
162
+ top_p: Optional[float] = None,
163
+ temperature: Optional[float] = None,
164
+ num_beams: Optional[int] = None,
165
+ no_repeat_ngram_size: Optional[int] = None,
166
+ min_length: Optional[int] = None,
167
+ forced_bos_token_id: Optional[int] = None,
168
+ forced_eos_token_id: Optional[int] = None,
169
+ length_penalty: Optional[float] = None,
170
+ early_stopping: Optional[bool] = None,
171
+ trace: bool = True,
172
+ params: Optional[Dict[str, jax_xla.DeviceArray]] = None,
173
+ **model_kwargs,
174
+ ):
175
+ r"""
176
+ Generates sequences for models with a language modeling head. The method currently supports greedy decoding,
177
+ and, multinomial sampling.
178
+ Apart from :obj:`input_ids`, all the arguments below will default to the value of the attribute of the same
179
+ name inside the :class:`~transformers.PretrainedConfig` of the model. The default values indicated are the
180
+ default values of those config.
181
+ Most of these parameters are explained in more detail in `this blog post
182
+ <https://huggingface.co/blog/how-to-generate>`__.
183
+ Parameters:
184
+ input_ids (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
185
+ The sequence used as a prompt for the generation.
186
+ max_length (:obj:`int`, `optional`, defaults to 20):
187
+ The maximum length of the sequence to be generated.
188
+ do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`):
189
+ Whether or not to use sampling ; use greedy decoding otherwise.
190
+ temperature (:obj:`float`, `optional`, defaults to 1.0):
191
+ The value used to module the next token probabilities.
192
+ top_k (:obj:`int`, `optional`, defaults to 50):
193
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
194
+ top_p (:obj:`float`, `optional`, defaults to 1.0):
195
+ If set to float < 1, only the most probable tokens with probabilities that add up to :obj:`top_p` or
196
+ higher are kept for generation.
197
+ pad_token_id (:obj:`int`, `optional`):
198
+ The id of the `padding` token.
199
+ bos_token_id (:obj:`int`, `optional`):
200
+ The id of the `beginning-of-sequence` token.
201
+ eos_token_id (:obj:`int`, `optional`):
202
+ The id of the `end-of-sequence` token.
203
+ num_beams (:obj:`int`, `optional`, defaults to 1):
204
+ Number of beams for beam search. 1 means no beam search.
205
+ decoder_start_token_id (:obj:`int`, `optional`):
206
+ If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token.
207
+ trace (:obj:`bool`, `optional`, defaults to :obj:`True`):
208
+ Whether to trace generation. Setting ``trace=False`` should only be used for debugging and will lead to
209
+ a considerably slower runtime.
210
+ params (:obj:`Dict[str, jax_xla.DeviceArray]`, `optional`):
211
+ Optionally the model parameters can be passed. Can be useful for parallelized generation.
212
+ model_kwargs:
213
+ Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model.
214
+ Return:
215
+ :class:`~transformers.file_utils.ModelOutput`.
216
+ Examples::
217
+ >>> from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
218
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
219
+ >>> model = FlaxAutoModelForCausalLM.from_pretrained("distilgpt2")
220
+ >>> input_context = "The dog"
221
+ >>> # encode input context
222
+ >>> input_ids = tokenizer(input_context, return_tensors="jax").input_ids
223
+ >>> # generate candidates using sampling
224
+ >>> outputs = model.generate(input_ids=input_ids, max_length=20, top_k=30, do_sample=True)
225
+ >>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
226
+ """
227
+ # set init values
228
+ max_length = max_length if max_length is not None else self.config.marian_config.max_length
229
+ bos_token_id = bos_token_id if bos_token_id is not None else self.config.marian_config.bos_token_id
230
+ pad_token_id = pad_token_id if pad_token_id is not None else self.config.marian_config.pad_token_id
231
+ eos_token_id = eos_token_id if eos_token_id is not None else self.config.marian_config.eos_token_id
232
+ decoder_start_token_id = (
233
+ decoder_start_token_id if decoder_start_token_id else self.config.marian_config.decoder_start_token_id
234
+ )
235
+ prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
236
+
237
+ if decoder_start_token_id is None and self.config.is_encoder_decoder:
238
+ raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.")
239
+
240
+ if self.config.is_encoder_decoder:
241
+ # add encoder_outputs to model_kwargs
242
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, model_kwargs)
243
+ # prepare decoder_input_ids for generation
244
+ input_ids = jnp.ones((input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
245
+
246
+ do_sample = do_sample if do_sample is not None else self.config.marian_config.do_sample
247
+ num_beams = num_beams if num_beams is not None else self.config.marian_config.num_beams
248
+
249
+ if not do_sample and num_beams == 1:
250
+ logits_processor = self._get_logits_processor(
251
+ no_repeat_ngram_size, min_length, max_length, eos_token_id, forced_bos_token_id, forced_eos_token_id
252
+ )
253
+ return self._greedy_search(
254
+ input_ids,
255
+ max_length,
256
+ pad_token_id,
257
+ eos_token_id,
258
+ logits_processor=logits_processor,
259
+ trace=trace,
260
+ params=params,
261
+ model_kwargs=model_kwargs,
262
+ )
263
+ elif do_sample and num_beams == 1:
264
+ logits_warper = self._get_logits_warper(top_k=top_k, top_p=top_p, temperature=temperature)
265
+ logits_processor = self._get_logits_processor(
266
+ no_repeat_ngram_size, min_length, max_length, eos_token_id, forced_bos_token_id, forced_eos_token_id
267
+ )
268
+ return self._sample(
269
+ input_ids,
270
+ max_length,
271
+ pad_token_id,
272
+ eos_token_id,
273
+ prng_key,
274
+ logits_warper=logits_warper,
275
+ logits_processor=logits_processor,
276
+ trace=trace,
277
+ params=params,
278
+ model_kwargs=model_kwargs,
279
+ )
280
+ elif not do_sample and num_beams > 1:
281
+ # broadcast input_ids & encoder_outputs
282
+ input_ids = self._expand_to_num_beams(input_ids, num_beams=num_beams)
283
+
284
+ if "encoder_outputs" in model_kwargs:
285
+ model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams(
286
+ model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=num_beams
287
+ )
288
+
289
+ if "attention_mask" in model_kwargs:
290
+ model_kwargs["attention_mask"] = self._expand_to_num_beams(
291
+ model_kwargs["attention_mask"], num_beams=num_beams
292
+ )
293
+
294
+ logits_processor = self._get_logits_processor(
295
+ no_repeat_ngram_size, min_length, max_length, eos_token_id, forced_bos_token_id, forced_eos_token_id
296
+ )
297
+
298
+ return self._beam_search(
299
+ input_ids,
300
+ max_length,
301
+ pad_token_id,
302
+ eos_token_id,
303
+ length_penalty=length_penalty,
304
+ early_stopping=early_stopping,
305
+ logits_processor=logits_processor,
306
+ trace=trace,
307
+ params=params,
308
+ model_kwargs=model_kwargs,
309
+ )
310
+ else:
311
+ raise NotImplementedError("`Beam sampling is currently not implemented.")
312
+
313
+ def _get_logits_warper(
314
+ self, top_k: int = None, top_p: float = None, temperature: float = None
315
+ ) -> FlaxLogitsProcessorList:
316
+ """
317
+ This class returns a :obj:`~transformers.FlaxLogitsProcessorList` list object that contains all relevant
318
+ :obj:`~transformers.FlaxLogitsWarper` instances used for multinomial sampling.
319
+ """
320
+
321
+ # init warp parameters
322
+ top_k = top_k if top_k is not None else self.config.marian_config.top_k
323
+ top_p = top_p if top_p is not None else self.config.marian_config.top_p
324
+ temperature = temperature if temperature is not None else self.config.marian_config.temperature
325
+ # instantiate warpers list
326
+ warpers = FlaxLogitsProcessorList()
327
+
328
+ # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
329
+ # all samplers can be found in `generation_utils_samplers.py`
330
+ if temperature is not None and temperature != 1.0:
331
+ warpers.append(FlaxTemperatureLogitsWarper(temperature))
332
+ if top_k is not None and top_k != 0:
333
+ warpers.append(FlaxTopKLogitsWarper(top_k=top_k, min_tokens_to_keep=1))
334
+ if top_p is not None and top_p < 1.0:
335
+ warpers.append(FlaxTopPLogitsWarper(top_p=top_p, min_tokens_to_keep=1))
336
+
337
+ return warpers
338
+
339
+ def _get_logits_processor(
340
+ self,
341
+ no_repeat_ngram_size: int,
342
+ min_length: int,
343
+ max_length: int,
344
+ eos_token_id: int,
345
+ forced_bos_token_id: int,
346
+ forced_eos_token_id: int,
347
+ ) -> FlaxLogitsProcessorList:
348
+ """
349
+ This class returns a :obj:`~transformers.FlaxLogitsProcessorList` list object that contains all relevant
350
+ :obj:`~transformers.FlaxLogitsProcessor` instances used to modify the scores of the language model head.
351
+ """
352
+ processors = FlaxLogitsProcessorList()
353
+
354
+ # init warp parameters
355
+ no_repeat_ngram_size = (
356
+ no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.marian_config.no_repeat_ngram_size
357
+ )
358
+ min_length = min_length if min_length is not None else self.config.marian_config.min_length
359
+ eos_token_id = eos_token_id if eos_token_id is not None else self.config.marian_config.eos_token_id
360
+ forced_bos_token_id = (
361
+ forced_bos_token_id if forced_bos_token_id is not None else self.config.marian_config.forced_bos_token_id
362
+ )
363
+ forced_eos_token_id = (
364
+ forced_eos_token_id if forced_eos_token_id is not None else self.config.marian_config.forced_eos_token_id
365
+ )
366
+
367
+ # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
368
+ # all samplers can be found in `generation_utils_samplers.py`
369
+ if min_length is not None and eos_token_id is not None and min_length > -1:
370
+ processors.append(FlaxMinLengthLogitsProcessor(min_length, eos_token_id))
371
+ if forced_bos_token_id is not None:
372
+ processors.append(FlaxForcedBOSTokenLogitsProcessor(forced_bos_token_id))
373
+ if forced_eos_token_id is not None:
374
+ processors.append(FlaxForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id))
375
+ return processors
376
+
377
+ def _greedy_search(
378
+ self,
379
+ input_ids: None,
380
+ max_length: Optional[int] = None,
381
+ pad_token_id: Optional[int] = None,
382
+ eos_token_id: Optional[int] = None,
383
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
384
+ trace: bool = True,
385
+ params: Optional[Dict[str, jax_xla.DeviceArray]] = None,
386
+ model_kwargs: Optional[Dict[str, jax_xla.DeviceArray]] = None,
387
+ ):
388
+ # init values
389
+ max_length = max_length if max_length is not None else self.config.marian_config.max_length
390
+ pad_token_id = pad_token_id if pad_token_id is not None else self.config.marian_config.pad_token_id
391
+ eos_token_id = eos_token_id if eos_token_id is not None else self.config.marian_config.eos_token_id
392
+
393
+ batch_size, cur_len = input_ids.shape
394
+
395
+ eos_token_id = jnp.array(eos_token_id)
396
+ pad_token_id = jnp.array(pad_token_id)
397
+ cur_len = jnp.array(cur_len)
398
+
399
+ # per batch-item holding current token in loop.
400
+ sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
401
+ sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
402
+
403
+ # per batch-item state bit indicating if sentence has finished.
404
+ is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
405
+
406
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
407
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
408
+ model = self.decode if self.config.is_encoder_decoder else self
409
+ # initialize model specific kwargs
410
+ model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
411
+
412
+ # initialize state
413
+ state = GreedyState(
414
+ cur_len=cur_len,
415
+ sequences=sequences,
416
+ running_token=input_ids,
417
+ is_sent_finished=is_sent_finished,
418
+ model_kwargs=model_kwargs,
419
+ )
420
+
421
+ def greedy_search_cond_fn(state):
422
+ """state termination condition fn."""
423
+ has_reached_max_length = state.cur_len == max_length
424
+ all_sequence_finished = jnp.all(state.is_sent_finished)
425
+ finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
426
+ return ~finish_generation
427
+
428
+ def greedy_search_body_fn(state):
429
+ """state update fn."""
430
+ model_outputs = model(state.running_token, params=params, **state.model_kwargs)
431
+ logits = model_outputs.logits[:, -1]
432
+
433
+ # apply min_length, ...
434
+ logits = logits_processor(state.sequences, logits, state.cur_len)
435
+
436
+ next_token = jnp.argmax(logits, axis=-1)
437
+
438
+ next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished
439
+ next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
440
+ next_token = next_token[:, None]
441
+
442
+ next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
443
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
444
+ return GreedyState(
445
+ cur_len=state.cur_len + 1,
446
+ sequences=next_sequences,
447
+ running_token=next_token,
448
+ is_sent_finished=next_is_sent_finished,
449
+ model_kwargs=next_model_kwargs,
450
+ )
451
+
452
+ # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
453
+ if input_ids.shape[1] > 1:
454
+ state = greedy_search_body_fn(state)
455
+
456
+ if not trace:
457
+ state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state)
458
+ else:
459
+ state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state)
460
+
461
+ return FlaxGreedySearchOutput(sequences=state.sequences)
462
+
463
+ def _sample(
464
+ self,
465
+ input_ids: None,
466
+ max_length: Optional[int] = None,
467
+ pad_token_id: Optional[int] = None,
468
+ eos_token_id: Optional[int] = None,
469
+ prng_key: Optional[jax_xla.DeviceArray] = None,
470
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
471
+ logits_warper: Optional[FlaxLogitsProcessorList] = None,
472
+ trace: bool = True,
473
+ params: Optional[Dict[str, jax_xla.DeviceArray]] = None,
474
+ model_kwargs: Optional[Dict[str, jax_xla.DeviceArray]] = None,
475
+ ):
476
+ # init values
477
+ max_length = max_length if max_length is not None else self.config.marian_config.max_length
478
+ pad_token_id = pad_token_id if pad_token_id is not None else self.config.marian_config.pad_token_id
479
+ eos_token_id = eos_token_id if eos_token_id is not None else self.config.marian_config.eos_token_id
480
+ prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
481
+
482
+ batch_size, cur_len = input_ids.shape
483
+
484
+ eos_token_id = jnp.array(eos_token_id)
485
+ pad_token_id = jnp.array(pad_token_id)
486
+ cur_len = jnp.array(cur_len)
487
+
488
+ # per batch-item holding current token in loop.
489
+ sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
490
+ sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
491
+
492
+ # per batch-item state bit indicating if sentence has finished.
493
+ is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
494
+
495
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
496
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
497
+ model = self.decode if self.config.is_encoder_decoder else self
498
+
499
+ # initialize model specific kwargs
500
+ model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
501
+
502
+ # initialize state
503
+ state = SampleState(
504
+ cur_len=cur_len,
505
+ sequences=sequences,
506
+ running_token=input_ids,
507
+ is_sent_finished=is_sent_finished,
508
+ prng_key=prng_key,
509
+ model_kwargs=model_kwargs,
510
+ )
511
+
512
+ def sample_search_cond_fn(state):
513
+ """state termination condition fn."""
514
+ has_reached_max_length = state.cur_len == max_length
515
+ all_sequence_finished = jnp.all(state.is_sent_finished)
516
+ finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
517
+ return ~finish_generation
518
+
519
+ def sample_search_body_fn(state):
520
+ """state update fn."""
521
+ prng_key, prng_key_next = jax.random.split(state.prng_key)
522
+ model_outputs = model(state.running_token, params=params, **state.model_kwargs)
523
+
524
+ logits = model_outputs.logits[:, -1]
525
+
526
+ # apply min_length, ...
527
+ logits = logits_processor(state.sequences, logits, state.cur_len)
528
+ # apply top_k, top_k, temperature
529
+ logits = logits_warper(logits, logits, state.cur_len)
530
+
531
+ next_token = jax.random.categorical(prng_key, model_outputs.logits[:, -1], axis=-1)
532
+
533
+ next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
534
+ next_token = next_token * ~next_is_sent_finished + pad_token_id * next_is_sent_finished
535
+ next_token = next_token[:, None]
536
+
537
+ next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
538
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
539
+
540
+ return SampleState(
541
+ cur_len=state.cur_len + 1,
542
+ sequences=next_sequences,
543
+ running_token=next_token,
544
+ is_sent_finished=next_is_sent_finished,
545
+ model_kwargs=next_model_kwargs,
546
+ prng_key=prng_key_next,
547
+ )
548
+
549
+ # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
550
+ if input_ids.shape[1] > 1:
551
+ state = sample_search_body_fn(state)
552
+
553
+ if not trace:
554
+ state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state)
555
+ else:
556
+ state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state)
557
+
558
+ return FlaxSampleOutput(sequences=state.sequences)
559
+
560
+ def _beam_search(
561
+ self,
562
+ input_ids: None,
563
+ max_length: Optional[int] = None,
564
+ pad_token_id: Optional[int] = None,
565
+ eos_token_id: Optional[int] = None,
566
+ length_penalty: Optional[float] = None,
567
+ early_stopping: Optional[bool] = None,
568
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
569
+ trace: bool = True,
570
+ params: Optional[Dict[str, jax_xla.DeviceArray]] = None,
571
+ model_kwargs: Optional[Dict[str, jax_xla.DeviceArray]] = None,
572
+ ):
573
+ """
574
+ This beam search function is heavily inspired by Flax's official example:
575
+ https://github.com/google/flax/blob/master/examples/wmt/train.py#L254
576
+ """
577
+
578
+ def flatten_beam_dim(tensor):
579
+ """Flattens the first two dimensions of a non-scalar array."""
580
+ # ignore scalars (e.g. cache index)
581
+ if tensor.ndim == 0:
582
+ return tensor
583
+ return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:])
584
+
585
+ def unflatten_beam_dim(tensor, batch_size, num_beams):
586
+ """Unflattens the first, flat batch*beam dimension of a non-scalar array."""
587
+ # ignore scalars (e.g. cache index)
588
+ if tensor.ndim == 0:
589
+ return tensor
590
+ return tensor.reshape((batch_size, num_beams) + tensor.shape[1:])
591
+
592
+ def gather_beams(nested, beam_indices, batch_size, new_num_beams):
593
+ """
594
+ Gathers the beam slices indexed by beam_indices into new beam array.
595
+ """
596
+ batch_indices = jnp.reshape(
597
+ jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams)
598
+ )
599
+
600
+ def gather_fn(tensor):
601
+ # ignore scalars (e.g. cache index)
602
+ if tensor.ndim == 0:
603
+ return tensor
604
+ else:
605
+ return tensor[batch_indices, beam_indices]
606
+
607
+ return jax.tree_map(gather_fn, nested)
608
+
609
+ # init values
610
+ max_length = max_length if max_length is not None else self.config.marian_config.max_length
611
+ pad_token_id = pad_token_id if pad_token_id is not None else self.config.marian_config.pad_token_id
612
+ eos_token_id = eos_token_id if eos_token_id is not None else self.config.marian_config.eos_token_id
613
+ length_penalty = length_penalty if length_penalty is not None else self.config.marian_config.length_penalty
614
+ early_stopping = early_stopping if early_stopping is not None else self.config.marian_config.early_stopping
615
+
616
+ batch_size, num_beams, cur_len = input_ids.shape
617
+
618
+ eos_token_id = jnp.array(eos_token_id)
619
+ pad_token_id = jnp.array(pad_token_id)
620
+ cur_len = jnp.array(cur_len)
621
+
622
+ # per batch,beam-item holding current token in loop.
623
+ sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
624
+ running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
625
+ running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0))
626
+
627
+ # per batch,beam-item state bit indicating if sentence has finished.
628
+ is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_)
629
+
630
+ # per batch,beam-item score, logprobs
631
+ running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1])
632
+ scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7)
633
+
634
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
635
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
636
+ model = self.decode if self.config.is_encoder_decoder else self
637
+
638
+ # flatten beam dim
639
+ if "encoder_outputs" in model_kwargs:
640
+ model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim(
641
+ model_kwargs["encoder_outputs"]["last_hidden_state"]
642
+ )
643
+ if "attention_mask" in model_kwargs:
644
+ model_kwargs["attention_mask"] = flatten_beam_dim(model_kwargs["attention_mask"])
645
+
646
+ # initialize model specific kwargs
647
+ model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs)
648
+
649
+ # initialize state
650
+ state = BeamSearchState(
651
+ cur_len=cur_len,
652
+ running_sequences=running_sequences,
653
+ running_scores=running_scores,
654
+ sequences=sequences,
655
+ scores=scores,
656
+ is_sent_finished=is_sent_finished,
657
+ model_kwargs=model_kwargs,
658
+ )
659
+
660
+ def beam_search_cond_fn(state):
661
+ """beam search state termination condition fn."""
662
+
663
+ # 1. is less than max length?
664
+ not_max_length_yet = state.cur_len < max_length
665
+
666
+ # 2. can the new beams still improve?
667
+ best_running_score = state.running_scores[:, -1:] / (max_length ** length_penalty)
668
+ worst_finished_score = jnp.where(
669
+ state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7)
670
+ )
671
+ improvement_still_possible = jnp.all(worst_finished_score < best_running_score)
672
+
673
+ # 3. is there still a beam that has not finished?
674
+ still_open_beam = ~(jnp.all(state.is_sent_finished) & early_stopping)
675
+
676
+ return not_max_length_yet & still_open_beam & improvement_still_possible
677
+
678
+ def beam_search_body_fn(state, input_ids_length=1):
679
+ """beam search state update fn."""
680
+ # 1. Forward current tokens
681
+ # Collect the current position slice along length to feed the fast
682
+ # autoregressive decoder model. Flatten the beam dimension into batch
683
+ # dimension for feeding into the model.
684
+ # unflatten beam dimension
685
+ # Unflatten beam dimension in attention cache arrays
686
+ input_token = flatten_beam_dim(
687
+ lax.dynamic_slice(
688
+ state.running_sequences,
689
+ (0, 0, state.cur_len - input_ids_length),
690
+ (batch_size, num_beams, input_ids_length),
691
+ )
692
+ )
693
+ model_outputs = model(input_token, params=params, **state.model_kwargs)
694
+
695
+ logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams)
696
+ cache = jax.tree_map(
697
+ lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values
698
+ )
699
+
700
+ # adapt logits for FlaxMarianMTModel
701
+ logits = self._adapt_logits_for_beam_search(logits)
702
+
703
+ # 2. Compute log probs
704
+ # get log probabilities from logits,
705
+ # process logits with processors (*e.g.* min_length, ...), and
706
+ # add new logprobs to existing running logprobs scores.
707
+ log_probs = jax.nn.log_softmax(logits)
708
+ log_probs = logits_processor(
709
+ flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), state.cur_len
710
+ )
711
+ log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams)
712
+ log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2)
713
+ vocab_size = log_probs.shape[2]
714
+ log_probs = log_probs.reshape((batch_size, num_beams * vocab_size))
715
+
716
+ # 3. Retrieve top-K
717
+ # Each item in batch has num_beams * vocab_size candidate sequences.
718
+ # For each item, get the top 2*k candidates with the highest log-
719
+ # probabilities. We gather the top 2*K beams here so that even if the best
720
+ # K sequences reach EOS simultaneously, we have another K sequences
721
+ # remaining to continue the live beam search.
722
+ # Gather the top 2*K scores from _all_ beams.
723
+ # Gather 2*k top beams.
724
+ # Recover the beam index by floor division.
725
+ # Recover token id by modulo division and expand Id array for broadcasting.
726
+ # Update sequences for the 2*K top-k new sequences.
727
+ beams_to_keep = 2 * num_beams
728
+ topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep)
729
+ topk_beam_indices = topk_indices // vocab_size
730
+ topk_running_sequences = gather_beams(
731
+ state.running_sequences, topk_beam_indices, batch_size, beams_to_keep
732
+ )
733
+ topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2)
734
+ topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len))
735
+
736
+ # 4. Check which sequences have ended
737
+ # Update current sequences:
738
+ # Did any of these sequences reach an end marker?
739
+ # To prevent these just finished sequences from being added to the current sequences
740
+ # set of active beam search sequences, set their log probs to a very large
741
+ # negative value.
742
+ did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id
743
+ running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7)
744
+ # 5. Get running sequences scores for next
745
+ # Determine the top k beam indices (from top 2*k beams) from log probs
746
+ # and gather top k beams (from top 2*k beams).
747
+ next_topk_indices = jnp.flip(lax.top_k(running_topk_log_probs, k=num_beams)[1], axis=1)
748
+ next_running_sequences, next_running_scores = gather_beams(
749
+ [topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams
750
+ )
751
+
752
+ # 6. Process topk logits
753
+ # Further process log probs:
754
+ # - add length penalty
755
+ # - make sure no scores can be added anymore if beam is full
756
+ # - make sure still running sequences cannot be chosen as finalized beam
757
+ topk_log_probs = topk_log_probs / (state.cur_len ** length_penalty)
758
+ beams_in_batch_are_full = (
759
+ jnp.broadcast_to(state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape)
760
+ & early_stopping
761
+ )
762
+ add_penalty = ~did_topk_just_finished | beams_in_batch_are_full
763
+ topk_log_probs += add_penalty * np.array(-1.0e7)
764
+
765
+ # 7. Get scores, sequences, is sentence finished for next.
766
+ # Combine sequences, scores, and flags along the beam dimension and compare
767
+ # new finished sequence scores to existing finished scores and select the
768
+ # best from the new set of beams
769
+ merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1)
770
+ merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1)
771
+ merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1)
772
+ topk_merged_indices = jnp.flip(lax.top_k(merged_scores, k=num_beams)[1], axis=1)
773
+ next_sequences, next_scores, next_is_sent_finished = gather_beams(
774
+ [merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams
775
+ )
776
+
777
+ # 8. Update model kwargs.
778
+ # Determine the top k beam indices from the original set of all beams.
779
+ # With these, gather the top k beam-associated caches.
780
+ next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams)
781
+ next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams)
782
+ model_outputs["past_key_values"] = jax.tree_map(lambda x: flatten_beam_dim(x), next_cache)
783
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
784
+
785
+ return BeamSearchState(
786
+ cur_len=state.cur_len + 1,
787
+ running_scores=next_running_scores,
788
+ running_sequences=next_running_sequences,
789
+ scores=next_scores,
790
+ sequences=next_sequences,
791
+ is_sent_finished=next_is_sent_finished,
792
+ model_kwargs=next_model_kwargs,
793
+ )
794
+
795
+ # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
796
+ if input_ids.shape[-1] > 1:
797
+ state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state)
798
+
799
+ if not trace:
800
+ state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state)
801
+ else:
802
+ state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state)
803
+
804
+ # Account for the edge-case where there are no finished sequences for a
805
+ # particular batch item. If so, return running sequences for that batch item.
806
+ none_finished = jnp.any(state.is_sent_finished, axis=1)
807
+ sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences)
808
+ scores = jnp.where(none_finished[:, None], state.scores, state.running_scores)
809
+
810
+ # take best beam for each batch
811
+ sequences = sequences[:, -1]
812
+ scores = scores[:, -1]
813
+
814
+ return FlaxBeamSearchOutput(sequences=sequences, scores=scores)
model/flax_clip_vision_marian/modeling_clip_vision_marian.py ADDED
@@ -0,0 +1,778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Optional, Tuple
2
+
3
+ import flax.linen as nn
4
+ import jax
5
+ import jax.numpy as jnp
6
+ from flax.core.frozen_dict import FrozenDict, unfreeze
7
+ from jax import lax
8
+ from jax.random import PRNGKey
9
+ from transformers import (
10
+ CLIPVisionConfig,
11
+ FlaxCLIPVisionModel,
12
+ FlaxMarianMTModel,
13
+ MarianConfig,
14
+ )
15
+ from transformers.modeling_flax_outputs import (
16
+ FlaxBaseModelOutputWithPooling,
17
+ FlaxCausalLMOutputWithCrossAttentions,
18
+ FlaxSeq2SeqLMOutput,
19
+ FlaxSeq2SeqModelOutput,
20
+ )
21
+ from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
22
+ from transformers.models.marian.modeling_flax_marian import (
23
+ FlaxMarianDecoder,
24
+ FlaxPreTrainedModel,
25
+ shift_tokens_right,
26
+ )
27
+
28
+ from .configuration_clip_vision_marian import CLIPVisionMarianConfig
29
+ from .modeling_clip_vision_marian_utils import FlaxCLIPVisionMarianPreTrainedModel
30
+
31
+
32
+ class FlaxCLIPVisionMarianModule(nn.Module):
33
+ config: CLIPVisionMarianConfig
34
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
35
+
36
+ def setup(self):
37
+ self.shared = nn.Embed(
38
+ self.config.marian_config.vocab_size,
39
+ self.config.marian_config.d_model,
40
+ embedding_init=jax.nn.initializers.normal(
41
+ self.config.marian_config.init_std, self.dtype
42
+ ),
43
+ dtype=self.dtype,
44
+ )
45
+
46
+ self.encoder = FlaxCLIPVisionModule(
47
+ self.config.clip_vision_config, dtype=self.dtype
48
+ )
49
+ self.decoder = FlaxMarianDecoder(
50
+ self.config.marian_config, dtype=self.dtype, embed_tokens=self.shared
51
+ )
52
+
53
+ self.visual_projection = nn.Dense(
54
+ self.config.marian_config.hidden_size,
55
+ dtype=self.dtype,
56
+ kernel_init=jax.nn.initializers.normal(
57
+ self.config.marian_config.init_std, self.dtype
58
+ ),
59
+ )
60
+
61
+ def _get_encoder_module(self):
62
+ return self.encoder
63
+
64
+ def _get_decoder_module(self):
65
+ return self.decoder
66
+
67
+ def __call__(
68
+ self,
69
+ pixel_values,
70
+ decoder_input_ids,
71
+ decoder_attention_mask,
72
+ decoder_position_ids,
73
+ output_attentions: bool = False,
74
+ output_hidden_states: bool = False,
75
+ return_dict: bool = True,
76
+ deterministic: bool = True,
77
+ ):
78
+
79
+ encoder_outputs = self.encoder(
80
+ pixel_values=pixel_values,
81
+ output_attentions=output_attentions,
82
+ output_hidden_states=output_hidden_states,
83
+ return_dict=return_dict,
84
+ deterministic=deterministic,
85
+ )
86
+
87
+ batch_size, sequence_length = encoder_outputs[0].shape[:2]
88
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
89
+
90
+ encoder_hidden_states = self.visual_projection(encoder_outputs[0])
91
+
92
+
93
+ decoder_outputs = self.decoder(
94
+ input_ids=decoder_input_ids,
95
+ attention_mask=decoder_attention_mask,
96
+ position_ids=decoder_position_ids,
97
+ encoder_hidden_states=encoder_hidden_states,
98
+ encoder_attention_mask=encoder_attention_mask,
99
+ output_attentions=output_attentions,
100
+ output_hidden_states=output_hidden_states,
101
+ return_dict=return_dict,
102
+ deterministic=deterministic,
103
+ )
104
+
105
+ if not return_dict:
106
+ return decoder_outputs + encoder_outputs
107
+
108
+ return FlaxSeq2SeqModelOutput(
109
+ last_hidden_state=decoder_outputs.last_hidden_state,
110
+ decoder_hidden_states=decoder_outputs.hidden_states,
111
+ decoder_attentions=decoder_outputs.attentions,
112
+ cross_attentions=decoder_outputs.cross_attentions,
113
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
114
+ encoder_hidden_states=encoder_outputs.hidden_states,
115
+ encoder_attentions=encoder_outputs.attentions,
116
+ )
117
+
118
+
119
+ class FlaxCLIPVisionMarianMTModule(nn.Module):
120
+ config: CLIPVisionMarianConfig
121
+ dtype: jnp.dtype = jnp.float32
122
+ bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
123
+
124
+ def setup(self):
125
+ self.model = FlaxCLIPVisionMarianModule(config=self.config, dtype=self.dtype)
126
+ self.lm_head = nn.Dense(
127
+ self.model.shared.num_embeddings,
128
+ use_bias=False,
129
+ dtype=self.dtype,
130
+ kernel_init=jax.nn.initializers.normal(
131
+ self.config.marian_config.init_std, self.dtype
132
+ ),
133
+ )
134
+ self.final_logits_bias = self.param(
135
+ "final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings)
136
+ )
137
+
138
+ def _get_encoder_module(self):
139
+ return self.model.encoder
140
+
141
+ def _get_decoder_module(self):
142
+ return self.model.decoder
143
+
144
+ def _get_visual_projection_module(self):
145
+ return self.model.visual_projection
146
+
147
+ def __call__(
148
+ self,
149
+ pixel_values,
150
+ decoder_input_ids,
151
+ decoder_attention_mask,
152
+ decoder_position_ids,
153
+ output_attentions: bool = False,
154
+ output_hidden_states: bool = False,
155
+ return_dict: bool = True,
156
+ deterministic: bool = True,
157
+ ):
158
+ outputs = self.model(
159
+ pixel_values=pixel_values,
160
+ decoder_input_ids=decoder_input_ids,
161
+ decoder_attention_mask=decoder_attention_mask,
162
+ decoder_position_ids=decoder_position_ids,
163
+ output_attentions=output_attentions,
164
+ output_hidden_states=output_hidden_states,
165
+ return_dict=return_dict,
166
+ deterministic=deterministic,
167
+ )
168
+
169
+ hidden_states = outputs[0]
170
+
171
+ if self.config.tie_word_embeddings:
172
+ shared_embedding = self.model.variables["params"]["shared"]["embedding"]
173
+ lm_logits = self.lm_head.apply(
174
+ {"params": {"kernel": shared_embedding.T}}, hidden_states
175
+ )
176
+ else:
177
+ lm_logits = self.lm_head(hidden_states)
178
+
179
+ lm_logits += self.final_logits_bias
180
+
181
+ if not return_dict:
182
+ output = (lm_logits,) + outputs[1:]
183
+ return output
184
+
185
+ return FlaxSeq2SeqLMOutput(
186
+ logits=lm_logits,
187
+ decoder_hidden_states=outputs.decoder_hidden_states,
188
+ decoder_attentions=outputs.decoder_attentions,
189
+ cross_attentions=outputs.cross_attentions,
190
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
191
+ encoder_hidden_states=outputs.encoder_hidden_states,
192
+ encoder_attentions=outputs.encoder_attentions,
193
+ )
194
+
195
+
196
+ class FlaxCLIPVisionMarianOuterPreTrainedModel(FlaxCLIPVisionMarianPreTrainedModel):
197
+ config_class = CLIPVisionMarianConfig
198
+ base_model_prefix: str = "model"
199
+ module_class: nn.Module = None
200
+
201
+ def __init__(
202
+ self,
203
+ config: CLIPVisionMarianConfig,
204
+ input_shape: Tuple = None,
205
+ seed: int = 0,
206
+ dtype: jnp.dtype = jnp.float32,
207
+ **kwargs,
208
+ ):
209
+ if input_shape is None:
210
+ input_shape = (
211
+ (
212
+ 1,
213
+ config.clip_vision_config.image_size,
214
+ config.clip_vision_config.image_size,
215
+ 3,
216
+ ),
217
+ (1, 1),
218
+ )
219
+
220
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
221
+ super().__init__(
222
+ config, module, input_shape=input_shape, seed=seed, dtype=dtype
223
+ )
224
+
225
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:
226
+ # init input tensors
227
+ pixel_values = jax.random.normal(rng, input_shape[0])
228
+ # # make sure initialization pass will work for FlaxMarianForSequenceClassificationModule
229
+ # input_ids = jax.ops.index_update(input_ids, (..., -1), self.config.eos_token_id)
230
+
231
+ decoder_input_ids = jnp.zeros(input_shape[1], dtype="i4")
232
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
233
+
234
+ batch_size, sequence_length = decoder_input_ids.shape
235
+ decoder_position_ids = jnp.broadcast_to(
236
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
237
+ )
238
+
239
+ params_rng, dropout_rng = jax.random.split(rng)
240
+ rngs = {"params": params_rng, "dropout": dropout_rng}
241
+
242
+ return self.module.init(
243
+ rngs,
244
+ pixel_values,
245
+ decoder_input_ids,
246
+ decoder_attention_mask,
247
+ decoder_position_ids,
248
+ )["params"]
249
+
250
+ def init_cache(self, batch_size, max_length, encoder_outputs):
251
+
252
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
253
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
254
+ decoder_position_ids = jnp.broadcast_to(
255
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]),
256
+ decoder_input_ids.shape,
257
+ )
258
+
259
+ def _decoder_forward(
260
+ module,
261
+ decoder_input_ids,
262
+ decoder_attention_mask,
263
+ decoder_position_ids,
264
+ **kwargs,
265
+ ):
266
+ decoder_module = module._get_decoder_module()
267
+ return decoder_module(
268
+ decoder_input_ids,
269
+ decoder_attention_mask,
270
+ decoder_position_ids,
271
+ **kwargs,
272
+ )
273
+
274
+ init_variables = self.module.init(
275
+ jax.random.PRNGKey(0),
276
+ decoder_input_ids=decoder_input_ids,
277
+ decoder_attention_mask=decoder_attention_mask,
278
+ decoder_position_ids=decoder_position_ids,
279
+ encoder_hidden_states=encoder_outputs[0],
280
+ init_cache=True,
281
+ method=_decoder_forward, # we only need to call the decoder to init the cache
282
+ )
283
+ return unfreeze(init_variables["cache"])
284
+
285
+ def encode(
286
+ self,
287
+ pixel_values: jnp.ndarray,
288
+ output_attentions: Optional[bool] = None,
289
+ output_hidden_states: Optional[bool] = None,
290
+ return_dict: Optional[bool] = None,
291
+ train: bool = False,
292
+ params: dict = None,
293
+ dropout_rng: PRNGKey = None,
294
+ ):
295
+ output_attentions = (
296
+ output_attentions
297
+ if output_attentions is not None
298
+ else self.config.output_attentions
299
+ )
300
+ output_hidden_states = (
301
+ output_hidden_states
302
+ if output_hidden_states is not None
303
+ else self.config.output_hidden_states
304
+ )
305
+ return_dict = (
306
+ return_dict if return_dict is not None else self.config.return_dict
307
+ )
308
+
309
+ # pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
310
+
311
+ # Handle any PRNG if needed
312
+ rngs = {}
313
+ if dropout_rng is not None:
314
+ rngs["dropout"] = dropout_rng
315
+
316
+ def _encoder_forward(module, pixel_values, **kwargs):
317
+ encode_module = module._get_encoder_module()
318
+ visual_projection = module._get_visual_projection_module()
319
+ outputs = encode_module(pixel_values, **kwargs)
320
+
321
+ return FlaxBaseModelOutputWithPooling(
322
+ last_hidden_state=visual_projection(outputs.last_hidden_state),
323
+ pooler_output=outputs.pooler_output,
324
+ hidden_states=outputs.hidden_states,
325
+ attentions=outputs.attentions,
326
+ )
327
+
328
+ return self.module.apply(
329
+ {"params": params or self.params},
330
+ pixel_values=jnp.array(pixel_values, dtype=jnp.float32),
331
+ output_attentions=output_attentions,
332
+ output_hidden_states=output_hidden_states,
333
+ return_dict=return_dict,
334
+ deterministic=not train,
335
+ rngs=rngs,
336
+ method=_encoder_forward,
337
+ )
338
+
339
+ def decode(
340
+ self,
341
+ decoder_input_ids,
342
+ encoder_outputs,
343
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
344
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
345
+ decoder_position_ids: Optional[jnp.ndarray] = None,
346
+ past_key_values: dict = None,
347
+ output_attentions: Optional[bool] = None,
348
+ output_hidden_states: Optional[bool] = None,
349
+ return_dict: Optional[bool] = None,
350
+ train: bool = False,
351
+ params: dict = None,
352
+ dropout_rng: PRNGKey = None,
353
+ ):
354
+
355
+ output_attentions = (
356
+ output_attentions
357
+ if output_attentions is not None
358
+ else self.config.output_attentions
359
+ )
360
+ output_hidden_states = (
361
+ output_hidden_states
362
+ if output_hidden_states is not None
363
+ else self.config.output_hidden_states
364
+ )
365
+ return_dict = (
366
+ return_dict if return_dict is not None else self.config.return_dict
367
+ )
368
+
369
+ encoder_hidden_states = encoder_outputs[0]
370
+
371
+ if encoder_attention_mask is None:
372
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
373
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
374
+
375
+ batch_size, sequence_length = decoder_input_ids.shape
376
+ if decoder_attention_mask is None:
377
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
378
+
379
+ if decoder_position_ids is None:
380
+ if past_key_values is not None:
381
+ raise ValueError(
382
+ "Make sure to provide `decoder_position_ids` when passing `past_key_values`."
383
+ )
384
+
385
+ decoder_position_ids = jnp.broadcast_to(
386
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
387
+ )
388
+
389
+ # Handle any PRNG if needed
390
+ rngs = {}
391
+ if dropout_rng is not None:
392
+ rngs["dropout"] = dropout_rng
393
+
394
+ inputs = {"params": params or self.params}
395
+
396
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
397
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
398
+ # it can be changed by FlaxMarianAttention module
399
+ if past_key_values:
400
+ inputs["cache"] = past_key_values
401
+ mutable = ["cache"]
402
+ else:
403
+ mutable = False
404
+
405
+ def _decoder_forward(
406
+ module,
407
+ decoder_input_ids,
408
+ decoder_attention_mask,
409
+ decoder_position_ids,
410
+ **kwargs,
411
+ ):
412
+ decoder_module = module._get_decoder_module()
413
+ return decoder_module(
414
+ decoder_input_ids,
415
+ decoder_attention_mask,
416
+ decoder_position_ids,
417
+ **kwargs,
418
+ )
419
+
420
+ outputs = self.module.apply(
421
+ inputs,
422
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
423
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
424
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
425
+ encoder_hidden_states=encoder_hidden_states,
426
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
427
+ output_attentions=output_attentions,
428
+ output_hidden_states=output_hidden_states,
429
+ return_dict=return_dict,
430
+ deterministic=not train,
431
+ rngs=rngs,
432
+ mutable=mutable,
433
+ method=_decoder_forward,
434
+ )
435
+
436
+ # add updated cache to model output
437
+ if past_key_values is not None and return_dict:
438
+ outputs, past = outputs
439
+ outputs["past_key_values"] = unfreeze(past["cache"])
440
+ return outputs
441
+ elif past_key_values is not None and not return_dict:
442
+ outputs, past = outputs
443
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
444
+
445
+ return outputs
446
+
447
+ def __call__(
448
+ self,
449
+ pixel_values: jnp.ndarray,
450
+ decoder_input_ids: Optional[jnp.ndarray] = None,
451
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
452
+ decoder_position_ids: Optional[jnp.ndarray] = None,
453
+ output_attentions: Optional[bool] = None,
454
+ output_hidden_states: Optional[bool] = None,
455
+ return_dict: Optional[bool] = None,
456
+ train: bool = False,
457
+ params: dict = None,
458
+ dropout_rng: PRNGKey = None,
459
+ ):
460
+ output_attentions = (
461
+ output_attentions
462
+ if output_attentions is not None
463
+ else self.config.output_attentions
464
+ )
465
+ output_hidden_states = (
466
+ output_hidden_states
467
+ if output_hidden_states is not None
468
+ else self.config.output_hidden_states
469
+ )
470
+ return_dict = (
471
+ return_dict if return_dict is not None else self.config.return_dict
472
+ )
473
+
474
+ # pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
475
+
476
+ # # prepare encoder inputs
477
+ # if attention_mask is None:
478
+ # attention_mask = jnp.ones_like(input_ids)
479
+ # if position_ids is None:
480
+ # batch_size, sequence_length = input_ids.shape
481
+ # position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
482
+
483
+ # prepare decoder inputs
484
+ # if decoder_input_ids is None:
485
+ # decoder_input_ids = shift_tokens_right(
486
+ # input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
487
+ # ) # TODO: Check how to use this
488
+ if decoder_attention_mask is None:
489
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
490
+ if decoder_position_ids is None:
491
+ batch_size, sequence_length = decoder_input_ids.shape
492
+ decoder_position_ids = jnp.broadcast_to(
493
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
494
+ )
495
+
496
+ # Handle any PRNG if needed
497
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
498
+
499
+ return self.module.apply(
500
+ {"params": params or self.params},
501
+ pixel_values=jnp.array(pixel_values, dtype=jnp.float32),
502
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
503
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
504
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
505
+ output_attentions=output_attentions,
506
+ output_hidden_states=output_hidden_states,
507
+ return_dict=return_dict,
508
+ deterministic=not train,
509
+ rngs=rngs,
510
+ )
511
+
512
+
513
+ class FlaxCLIPVisionMarianMT(
514
+ FlaxCLIPVisionMarianOuterPreTrainedModel
515
+ ):
516
+ module_class = FlaxCLIPVisionMarianMTModule
517
+ dtype: jnp.dtype = jnp.float32
518
+
519
+ def decode(
520
+ self,
521
+ decoder_input_ids,
522
+ encoder_outputs,
523
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
524
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
525
+ decoder_position_ids: Optional[jnp.ndarray] = None,
526
+ past_key_values: dict = None,
527
+ output_attentions: Optional[bool] = None,
528
+ output_hidden_states: Optional[bool] = None,
529
+ return_dict: Optional[bool] = None,
530
+ deterministic: bool = True,
531
+ params: dict = None,
532
+ dropout_rng: PRNGKey = None,
533
+ ):
534
+ output_attentions = (
535
+ output_attentions
536
+ if output_attentions is not None
537
+ else self.config.output_attentions
538
+ )
539
+ output_hidden_states = (
540
+ output_hidden_states
541
+ if output_hidden_states is not None
542
+ else self.config.output_hidden_states
543
+ )
544
+ return_dict = (
545
+ return_dict if return_dict is not None else self.config.return_dict
546
+ )
547
+
548
+ encoder_hidden_states = encoder_outputs[0]
549
+
550
+ if encoder_attention_mask is None:
551
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
552
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
553
+
554
+ batch_size, sequence_length = decoder_input_ids.shape
555
+ if decoder_attention_mask is None:
556
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
557
+
558
+ if decoder_position_ids is None:
559
+ if past_key_values is not None:
560
+ raise ValueError(
561
+ "Make sure to provide `decoder_position_ids` when passing `past_key_values`."
562
+ )
563
+
564
+ decoder_position_ids = jnp.broadcast_to(
565
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
566
+ )
567
+
568
+ # Handle any PRNG if needed
569
+ rngs = {}
570
+ if dropout_rng is not None:
571
+ rngs["dropout"] = dropout_rng
572
+
573
+ inputs = {"params": params or self.params}
574
+
575
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
576
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
577
+ # it can be changed by FlaxMarianAttention module
578
+ if past_key_values:
579
+ inputs["cache"] = past_key_values
580
+ mutable = ["cache"]
581
+ else:
582
+ mutable = False
583
+
584
+ def _decoder_forward(
585
+ module,
586
+ decoder_input_ids,
587
+ decoder_attention_mask,
588
+ decoder_position_ids,
589
+ **kwargs,
590
+ ):
591
+ decoder_module = module._get_decoder_module()
592
+ outputs = decoder_module(
593
+ decoder_input_ids,
594
+ decoder_attention_mask,
595
+ decoder_position_ids,
596
+ **kwargs,
597
+ )
598
+ hidden_states = outputs[0]
599
+
600
+ if self.config.tie_word_embeddings:
601
+ shared_embedding = module.model.variables["params"]["shared"][
602
+ "embedding"
603
+ ]
604
+ lm_logits = module.lm_head.apply(
605
+ {"params": {"kernel": shared_embedding.T}}, hidden_states
606
+ )
607
+ else:
608
+ lm_logits = module.lm_head(hidden_states)
609
+
610
+ lm_logits += module.final_logits_bias
611
+ return lm_logits, outputs
612
+
613
+ outputs = self.module.apply(
614
+ inputs,
615
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
616
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
617
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
618
+ encoder_hidden_states=encoder_hidden_states,
619
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
620
+ output_attentions=output_attentions,
621
+ output_hidden_states=output_hidden_states,
622
+ return_dict=return_dict,
623
+ deterministic=deterministic,
624
+ rngs=rngs,
625
+ mutable=mutable,
626
+ method=_decoder_forward,
627
+ )
628
+
629
+ if past_key_values is None:
630
+ lm_logits, decoder_outputs = outputs
631
+ else:
632
+ (lm_logits, decoder_outputs), past = outputs
633
+
634
+ if return_dict:
635
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
636
+ logits=lm_logits,
637
+ hidden_states=decoder_outputs.hidden_states,
638
+ attentions=decoder_outputs.attentions,
639
+ cross_attentions=decoder_outputs.cross_attentions,
640
+ )
641
+ else:
642
+ outputs = (lm_logits,) + decoder_outputs[1:]
643
+
644
+ # add updated cache to model output
645
+ if past_key_values is not None and return_dict:
646
+ outputs["past_key_values"] = unfreeze(past["cache"])
647
+ return outputs
648
+ elif past_key_values is not None and not return_dict:
649
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
650
+
651
+ return outputs
652
+
653
+ def _adapt_logits_for_beam_search(self, logits):
654
+ """This function enforces the padding token never to be generated."""
655
+ logits = jax.ops.index_update(logits, jax.ops.index[:, :, self.config.marian_config.pad_token_id], float("-inf"))
656
+ return logits
657
+
658
+ def prepare_inputs_for_generation(
659
+ self,
660
+ decoder_input_ids,
661
+ max_length,
662
+ attention_mask: Optional[jnp.DeviceArray] = None,
663
+ decoder_attention_mask: Optional[jnp.DeviceArray] = None,
664
+ encoder_outputs=None,
665
+ **kwargs,
666
+ ):
667
+ # initializing the cache
668
+ batch_size, seq_length = decoder_input_ids.shape
669
+
670
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
671
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
672
+ # But since the decoder uses a causal mask, those positions are masked anyways.
673
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
674
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
675
+ if decoder_attention_mask is not None:
676
+ position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
677
+ extended_attention_mask = lax.dynamic_update_slice(
678
+ extended_attention_mask, decoder_attention_mask, (0, 0)
679
+ )
680
+ else:
681
+ position_ids = jnp.broadcast_to(
682
+ jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)
683
+ )
684
+
685
+ return {
686
+ "past_key_values": past_key_values,
687
+ "encoder_outputs": encoder_outputs,
688
+ "encoder_attention_mask": attention_mask,
689
+ "decoder_attention_mask": extended_attention_mask,
690
+ "decoder_position_ids": position_ids,
691
+ }
692
+
693
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
694
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
695
+ model_kwargs["decoder_position_ids"] = (
696
+ model_kwargs["decoder_position_ids"][:, -1:] + 1
697
+ )
698
+ return model_kwargs
699
+
700
+ @classmethod
701
+ def from_pretrained(cls, *args, **kwargs):
702
+ # At the moment fast initialization is not supported
703
+ # for composite models
704
+ # kwargs["_fast_init"] = False
705
+ return super().from_pretrained(*args, **kwargs)
706
+
707
+ @classmethod
708
+ def from_clip_vision_marian_pretrained(
709
+ cls,
710
+ clip_vision_model_name_or_path: str = None,
711
+ marian_model_name_or_path: str = None,
712
+ *model_args,
713
+ **kwargs,
714
+ ) -> FlaxCLIPVisionMarianPreTrainedModel:
715
+
716
+ kwargs_marian = {
717
+ argument[len("marian_") :]: value
718
+ for argument, value in kwargs.items()
719
+ if argument.startswith("marian_")
720
+ }
721
+
722
+ kwargs_clip_vision = {
723
+ argument[len("clip_vision_") :]: value
724
+ for argument, value in kwargs.items()
725
+ if argument.startswith("clip_vision_")
726
+ }
727
+
728
+ # remove marian, clip_vision kwargs from kwargs
729
+ for key in kwargs_marian.keys():
730
+ del kwargs["marian_" + key]
731
+ for key in kwargs_clip_vision.keys():
732
+ del kwargs["clip_vision_" + key]
733
+
734
+ # Load and initialize the marian and clip_vision model
735
+ marian_model = kwargs_marian.pop("model", None)
736
+ if marian_model is None:
737
+ assert (
738
+ marian_model_name_or_path is not None
739
+ ), "If `model` is not defined as an argument, a `marian_model_name_or_path` has to be defined"
740
+
741
+ if "config" not in kwargs_marian:
742
+ marian_config = MarianConfig.from_pretrained(marian_model_name_or_path)
743
+ kwargs_marian["config"] = marian_config
744
+
745
+ marian_model = FlaxMarianMTModel.from_pretrained(
746
+ marian_model_name_or_path, *model_args, **kwargs_marian
747
+ )
748
+
749
+ clip_vision_model = kwargs_clip_vision.pop("model", None)
750
+ if clip_vision_model is None:
751
+ assert (
752
+ clip_vision_model_name_or_path is not None
753
+ ), "If `model` is not defined as an argument, a `clip_vision_model_name_or_path` has to be defined"
754
+
755
+ if "config" not in kwargs_clip_vision:
756
+ clip_vision_config = CLIPVisionConfig.from_pretrained(
757
+ clip_vision_model_name_or_path
758
+ )
759
+ kwargs_clip_vision["config"] = clip_vision_config
760
+
761
+ clip_vision_model = FlaxCLIPVisionModel.from_pretrained(
762
+ clip_vision_model_name_or_path, *model_args, **kwargs_clip_vision
763
+ )
764
+
765
+ # instantiate config with corresponding kwargs
766
+ dtype = kwargs.pop("dtype", jnp.float32)
767
+ config = CLIPVisionMarianConfig.from_clip_vision_marian_configs(
768
+ clip_vision_model.config, marian_model.config, **kwargs
769
+ )
770
+
771
+ # init model
772
+ model = cls(config, *model_args, dtype=dtype, **kwargs)
773
+ model.params["model"]["encoder"] = clip_vision_model.params
774
+ model.params["model"]["decoder"] = marian_model.params["model"]["decoder"]
775
+ model.params["model"]["shared"] = marian_model.params["model"]["shared"]
776
+ model.params["final_logits_bias"] = marian_model.params["final_logits_bias"]
777
+
778
+ return model
model/flax_clip_vision_marian/modeling_clip_vision_marian_utils.py ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from functools import partial
18
+ from pickle import UnpicklingError
19
+ from typing import Dict, Set, Tuple, Union
20
+
21
+ import flax.linen as nn
22
+ import jax
23
+ import jax.numpy as jnp
24
+ from flax.core.frozen_dict import FrozenDict, unfreeze
25
+ from flax.serialization import from_bytes, to_bytes
26
+ from flax.traverse_util import flatten_dict, unflatten_dict
27
+ from jax.random import PRNGKey
28
+
29
+ from transformers.configuration_utils import PretrainedConfig
30
+ from transformers.file_utils import (
31
+ FLAX_WEIGHTS_NAME,
32
+ WEIGHTS_NAME,
33
+ PushToHubMixin,
34
+ cached_path,
35
+ hf_bucket_url,
36
+ is_offline_mode,
37
+ is_remote_url,
38
+ )
39
+ from .generation_clip_vision_marian_utils import FlaxGenerationMixin
40
+ from transformers.modeling_flax_pytorch_utils import load_pytorch_checkpoint_in_flax_state_dict
41
+ from transformers.utils import logging
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ def quick_gelu(x):
48
+ return x * jax.nn.sigmoid(1.702 * x)
49
+
50
+
51
+ ACT2FN = {
52
+ "gelu": partial(nn.gelu, approximate=False),
53
+ "relu": nn.relu,
54
+ "silu": nn.swish,
55
+ "swish": nn.swish,
56
+ "gelu_new": partial(nn.gelu, approximate=True),
57
+ "quick_gelu": quick_gelu,
58
+ }
59
+
60
+
61
+ class FlaxCLIPVisionMarianPreTrainedModel(PushToHubMixin, FlaxGenerationMixin):
62
+ config_class = None
63
+ base_model_prefix = ""
64
+
65
+ def __init__(
66
+ self,
67
+ config: PretrainedConfig,
68
+ module: nn.Module,
69
+ input_shape: Tuple = (1, 1),
70
+ seed: int = 0,
71
+ dtype: jnp.dtype = jnp.float32,
72
+ ):
73
+ if config is None:
74
+ raise ValueError("config cannot be None")
75
+
76
+ if module is None:
77
+ raise ValueError("module cannot be None")
78
+
79
+ # Those are private to be exposed as typed property on derived classes.
80
+ self._config = config
81
+ self._module = module
82
+
83
+ # Those are public as their type is generic to every derived classes.
84
+ self.key = PRNGKey(seed)
85
+ self.dtype = dtype
86
+
87
+ # randomly initialized parameters
88
+ random_params = self.init_weights(self.key, input_shape)
89
+
90
+ # save required_params as set
91
+ self._required_params = set(flatten_dict(unfreeze(random_params)).keys())
92
+ self.params = random_params
93
+
94
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> Dict:
95
+ raise NotImplementedError(f"init method has to be implemented for {self}")
96
+
97
+ @classmethod
98
+ def _from_config(cls, config, **kwargs):
99
+ """
100
+ All context managers that the model should be initialized under go here.
101
+ """
102
+ return cls(config, **kwargs)
103
+
104
+ @property
105
+ def config(self) -> PretrainedConfig:
106
+ return self._config
107
+
108
+ @property
109
+ def module(self) -> nn.Module:
110
+ return self._module
111
+
112
+ @property
113
+ def params(self) -> Union[Dict, FrozenDict]:
114
+ return self._params
115
+
116
+ @property
117
+ def required_params(self) -> Set:
118
+ return self._required_params
119
+
120
+ @params.setter
121
+ def params(self, params: Union[Dict, FrozenDict]):
122
+ if isinstance(params, FrozenDict):
123
+ params = unfreeze(params)
124
+ param_keys = set(flatten_dict(params).keys())
125
+ if len(self.required_params - param_keys) > 0:
126
+ raise ValueError(
127
+ "Some parameters are missing. Make sure that `params` include the following "
128
+ f"parameters {self.required_params - param_keys}"
129
+ )
130
+ self._params = params
131
+
132
+ @classmethod
133
+ def from_pretrained(
134
+ cls,
135
+ pretrained_model_name_or_path: Union[str, os.PathLike],
136
+ dtype: jnp.dtype = jnp.float32,
137
+ *model_args,
138
+ **kwargs
139
+ ):
140
+ config = kwargs.pop("config", None)
141
+ cache_dir = kwargs.pop("cache_dir", None)
142
+ from_pt = kwargs.pop("from_pt", False)
143
+ ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
144
+ force_download = kwargs.pop("force_download", False)
145
+ resume_download = kwargs.pop("resume_download", False)
146
+ proxies = kwargs.pop("proxies", None)
147
+ local_files_only = kwargs.pop("local_files_only", False)
148
+ use_auth_token = kwargs.pop("use_auth_token", None)
149
+ revision = kwargs.pop("revision", None)
150
+ from_pipeline = kwargs.pop("_from_pipeline", None)
151
+ from_auto_class = kwargs.pop("_from_auto", False)
152
+
153
+ user_agent = {"file_type": "model", "framework": "flax", "from_auto_class": from_auto_class}
154
+ if from_pipeline is not None:
155
+ user_agent["using_pipeline"] = from_pipeline
156
+
157
+ if is_offline_mode() and not local_files_only:
158
+ logger.info("Offline mode: forcing local_files_only=True")
159
+ local_files_only = True
160
+
161
+ # Load config if we don't provide a configuration
162
+ if not isinstance(config, PretrainedConfig):
163
+ config_path = config if config is not None else pretrained_model_name_or_path
164
+ config, model_kwargs = cls.config_class.from_pretrained(
165
+ config_path,
166
+ *model_args,
167
+ cache_dir=cache_dir,
168
+ return_unused_kwargs=True,
169
+ force_download=force_download,
170
+ resume_download=resume_download,
171
+ proxies=proxies,
172
+ local_files_only=local_files_only,
173
+ use_auth_token=use_auth_token,
174
+ revision=revision,
175
+ _from_auto=from_auto_class,
176
+ _from_pipeline=from_pipeline,
177
+ **kwargs,
178
+ )
179
+ else:
180
+ model_kwargs = kwargs
181
+
182
+ # Add the dtype to model_kwargs
183
+ model_kwargs["dtype"] = dtype
184
+
185
+ # Load model
186
+ if pretrained_model_name_or_path is not None:
187
+ if os.path.isdir(pretrained_model_name_or_path):
188
+ if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
189
+ # Load from a PyTorch checkpoint
190
+ archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
191
+ elif os.path.isfile(os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)):
192
+ # Load from a Flax checkpoint
193
+ archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)
194
+ else:
195
+ raise EnvironmentError(
196
+ f"Error no file named {[FLAX_WEIGHTS_NAME, WEIGHTS_NAME]} found in directory "
197
+ f"{pretrained_model_name_or_path} or `from_pt` set to False"
198
+ )
199
+ elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
200
+ archive_file = pretrained_model_name_or_path
201
+ else:
202
+ archive_file = hf_bucket_url(
203
+ pretrained_model_name_or_path,
204
+ filename=WEIGHTS_NAME if from_pt else FLAX_WEIGHTS_NAME,
205
+ revision=revision,
206
+ )
207
+
208
+ # redirect to the cache, if necessary
209
+ try:
210
+ resolved_archive_file = cached_path(
211
+ archive_file,
212
+ cache_dir=cache_dir,
213
+ force_download=force_download,
214
+ proxies=proxies,
215
+ resume_download=resume_download,
216
+ local_files_only=local_files_only,
217
+ use_auth_token=use_auth_token,
218
+ user_agent=user_agent,
219
+ )
220
+ except EnvironmentError as err:
221
+ logger.error(err)
222
+ msg = (
223
+ f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
224
+ f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
225
+ f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named {WEIGHTS_NAME}.\n\n"
226
+ )
227
+ raise EnvironmentError(msg)
228
+
229
+ if resolved_archive_file == archive_file:
230
+ logger.info(f"loading weights file {archive_file}")
231
+ else:
232
+ logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
233
+ else:
234
+ resolved_archive_file = None
235
+
236
+ # init random models
237
+ model = cls(config, *model_args, **model_kwargs)
238
+
239
+ if from_pt:
240
+ state = load_pytorch_checkpoint_in_flax_state_dict(model, resolved_archive_file)
241
+ else:
242
+ with open(resolved_archive_file, "rb") as state_f:
243
+ try:
244
+ state = from_bytes(cls, state_f.read())
245
+ except UnpicklingError:
246
+ raise EnvironmentError(f"Unable to convert {archive_file} to Flax deserializable object. ")
247
+ # make sure all arrays are stored as jnp.arrays
248
+ # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4:
249
+ # https://github.com/google/flax/issues/1261
250
+ state = jax.tree_util.tree_map(jnp.array, state)
251
+
252
+ # if model is base model only use model_prefix key
253
+ if cls.base_model_prefix not in dict(model.params) and cls.base_model_prefix in state:
254
+ state = state[cls.base_model_prefix]
255
+
256
+ # if model is head model and we are loading weights from base model
257
+ # we initialize new params dict with base_model_prefix
258
+ if cls.base_model_prefix in dict(model.params) and cls.base_model_prefix not in state:
259
+ state = {cls.base_model_prefix: state}
260
+
261
+ # flatten dicts
262
+ state = flatten_dict(state)
263
+
264
+ random_state = flatten_dict(unfreeze(model.params))
265
+
266
+ missing_keys = model.required_params - set(state.keys())
267
+ unexpected_keys = set(state.keys()) - model.required_params
268
+
269
+ # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not
270
+ # matching the weights in the model.
271
+ mismatched_keys = []
272
+ for key in state.keys():
273
+ if key in random_state and state[key].shape != random_state[key].shape:
274
+ if ignore_mismatched_sizes:
275
+ mismatched_keys.append((key, state[key].shape, random_state[key].shape))
276
+ state[key] = random_state[key]
277
+ else:
278
+ raise ValueError(
279
+ f"Trying to load the pretrained weight for {key} failed: checkpoint has shape "
280
+ f"{state[key].shape} which is incompatible with the model shape {random_state[key].shape}. "
281
+ "Using `ignore_mismatched_sizes=True` if you really want to load this checkpoint inside this "
282
+ "model."
283
+ )
284
+
285
+ # add missing keys as random parameters
286
+ for missing_key in missing_keys:
287
+ state[missing_key] = random_state[missing_key]
288
+
289
+ # remove unexpected keys to not be saved again
290
+ for unexpected_key in unexpected_keys:
291
+ del state[unexpected_key]
292
+
293
+ if len(unexpected_keys) > 0:
294
+ logger.warning(
295
+ f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
296
+ f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
297
+ f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
298
+ f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
299
+ f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
300
+ f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
301
+ )
302
+ else:
303
+ logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
304
+
305
+ if len(missing_keys) > 0:
306
+ logger.warning(
307
+ f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
308
+ f"and are newly initialized: {missing_keys}\n"
309
+ f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
310
+ )
311
+ elif len(mismatched_keys) == 0:
312
+ logger.info(
313
+ f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
314
+ f"If your task is similar to the task the model of the checkpoint was trained on, "
315
+ f"you can already use {model.__class__.__name__} for predictions without further training."
316
+ )
317
+ if len(mismatched_keys) > 0:
318
+ mismatched_warning = "\n".join(
319
+ [
320
+ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
321
+ for key, shape1, shape2 in mismatched_keys
322
+ ]
323
+ )
324
+ logger.warning(
325
+ f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
326
+ f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n"
327
+ f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
328
+ )
329
+
330
+ # set correct parameters
331
+ model.params = unflatten_dict(state)
332
+
333
+ return model
334
+
335
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], params=None, push_to_hub=False, **kwargs):
336
+ """
337
+ Save a model and its configuration file to a directory, so that it can be re-loaded using the
338
+ `:func:`~transformers.FlaxPreTrainedModel.from_pretrained`` class method
339
+ Arguments:
340
+ save_directory (:obj:`str` or :obj:`os.PathLike`):
341
+ Directory to which to save. Will be created if it doesn't exist.
342
+ push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
343
+ Whether or not to push your model to the Hugging Face model hub after saving it.
344
+ .. warning::
345
+ Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with
346
+ :obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are
347
+ pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory
348
+ instead.
349
+ kwargs:
350
+ Additional key word arguments passed along to the
351
+ :meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
352
+ """
353
+ if os.path.isfile(save_directory):
354
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
355
+ return
356
+
357
+ if push_to_hub:
358
+ commit_message = kwargs.pop("commit_message", None)
359
+ repo = self._create_or_get_repo(save_directory, **kwargs)
360
+
361
+ os.makedirs(save_directory, exist_ok=True)
362
+
363
+ # get abs dir
364
+ save_directory = os.path.abspath(save_directory)
365
+ # save config as well
366
+ self.config.architectures = [self.__class__.__name__[4:]]
367
+ self.config.save_pretrained(save_directory)
368
+
369
+ # save model
370
+ output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME)
371
+ with open(output_model_file, "wb") as f:
372
+ params = params if params is not None else self.params
373
+ model_bytes = to_bytes(params)
374
+ f.write(model_bytes)
375
+
376
+ logger.info(f"Model weights saved in {output_model_file}")
377
+
378
+ if push_to_hub:
379
+ url = self._push_to_hub(repo, commit_message=commit_message)
380
+ logger.info(f"Model pushed to the hub in this commit: {url}")
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ plotly==5.1.0
2
+ streamlit==0.84.1
3
+ git+https://github.com/huggingface/transformers.git
4
+ torchvision==0.10.0
5
+ mtranslate==1.8
6
+ black==21.7b0
7
+ flax==0.3.4
8
+ sentencepiece==0.1.96
sections/abstract.md ADDED
File without changes
sections/acknowledgements.md ADDED
File without changes
sections/caveats.md ADDED
File without changes
sections/challenges.md ADDED
File without changes
sections/intro.md ADDED
File without changes
sections/pretraining.md ADDED
File without changes
sections/references.md ADDED
File without changes
sections/social_impact.md ADDED
File without changes
sections/usage.md ADDED
File without changes
session.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Code for managing session state, which is needed for multi-input forms
3
+ # See https://github.com/streamlit/streamlit/issues/1557
4
+ #
5
+ # This code is taken from
6
+ # https://gist.github.com/okld/0aba4869ba6fdc8d49132e6974e2e662
7
+ #
8
+ from streamlit.hashing import _CodeHasher
9
+ from streamlit.report_thread import get_report_ctx
10
+ from streamlit.server.server import Server
11
+
12
+
13
+ class _SessionState:
14
+ def __init__(self, session, hash_funcs):
15
+ """Initialize SessionState instance."""
16
+ self.__dict__["_state"] = {
17
+ "data": {},
18
+ "hash": None,
19
+ "hasher": _CodeHasher(hash_funcs),
20
+ "is_rerun": False,
21
+ "session": session,
22
+ }
23
+
24
+ def __call__(self, **kwargs):
25
+ """Initialize state data once."""
26
+ for item, value in kwargs.items():
27
+ if item not in self._state["data"]:
28
+ self._state["data"][item] = value
29
+
30
+ def __getitem__(self, item):
31
+ """Return a saved state value, None if item is undefined."""
32
+ return self._state["data"].get(item, None)
33
+
34
+ def __getattr__(self, item):
35
+ """Return a saved state value, None if item is undefined."""
36
+ return self._state["data"].get(item, None)
37
+
38
+ def __setitem__(self, item, value):
39
+ """Set state value."""
40
+ self._state["data"][item] = value
41
+
42
+ def __setattr__(self, item, value):
43
+ """Set state value."""
44
+ self._state["data"][item] = value
45
+
46
+ def clear(self):
47
+ """Clear session state and request a rerun."""
48
+ self._state["data"].clear()
49
+ self._state["session"].request_rerun()
50
+
51
+ def sync(self):
52
+ """
53
+ Rerun the app with all state values up to date from the beginning to
54
+ fix rollbacks.
55
+ """
56
+ data_to_bytes = self._state["hasher"].to_bytes(self._state["data"], None)
57
+
58
+ # Ensure to rerun only once to avoid infinite loops
59
+ # caused by a constantly changing state value at each run.
60
+ #
61
+ # Example: state.value += 1
62
+ if self._state["is_rerun"]:
63
+ self._state["is_rerun"] = False
64
+
65
+ elif self._state["hash"] is not None:
66
+ if self._state["hash"] != data_to_bytes:
67
+ self._state["is_rerun"] = True
68
+ self._state["session"].request_rerun()
69
+
70
+ self._state["hash"] = data_to_bytes
71
+
72
+
73
+ def _get_session():
74
+ session_id = get_report_ctx().session_id
75
+ session_info = Server.get_current()._get_session_info(session_id)
76
+
77
+ if session_info is None:
78
+ raise RuntimeError("Couldn't get your Streamlit Session object.")
79
+
80
+ return session_info.session
81
+
82
+
83
+ def _get_state(hash_funcs=None):
84
+ session = _get_session()
85
+
86
+ if not hasattr(session, "_custom_session_state"):
87
+ session._custom_session_state = _SessionState(session, hash_funcs)
88
+
89
+ return session._custom_session_state
utils.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torchvision.io import read_image, ImageReadMode
2
+ import torch
3
+ import numpy as np
4
+ from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize
5
+ from torchvision.transforms.functional import InterpolationMode
6
+ from PIL import Image
7
+
8
+
9
+ class Transform(torch.nn.Module):
10
+ def __init__(self, image_size):
11
+ super().__init__()
12
+ self.transforms = torch.nn.Sequential(
13
+ Resize([image_size], interpolation=InterpolationMode.BICUBIC),
14
+ CenterCrop(image_size),
15
+ ConvertImageDtype(torch.float),
16
+ Normalize(
17
+ (0.48145466, 0.4578275, 0.40821073),
18
+ (0.26862954, 0.26130258, 0.27577711),
19
+ ),
20
+ )
21
+
22
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
23
+ with torch.no_grad():
24
+ x = self.transforms(x)
25
+ return x
26
+
27
+
28
+ transform = Transform(224)
29
+
30
+ def get_transformed_image(image):
31
+ if image.shape[-1] == 3 and isinstance(image, np.ndarray):
32
+ image = image.transpose(2, 0, 1)
33
+ image = torch.tensor(image)
34
+ return transform(image).unsqueeze(0).permute(0, 2, 3, 1).numpy()