diff --git a/.gitattributes b/.gitattributes index 71d2815f7a40968760e021fc0d3cd628af266d6e..91eb3488317a97bbdd24aeca5ca7175279962979 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1576,3 +1576,4 @@ evalkit_tf446/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 filte evalkit_tf449/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/lite/dist/assets/gradio_client-1.5.3-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text evalkit_cambrian/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +infer_4_47_1/lib/python3.10/site-packages/fontTools/pens/momentsPen.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..02c4b7b754b295016c23b114213d1dd0353363e1 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py @@ -0,0 +1,134 @@ +# coding=utf-8 +# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import torch + +from transformers import ChineseCLIPConfig, ChineseCLIPModel + + +def copy_attn_layer(hf_attn_layer, pt_weights, prefix): + q_proj, k_proj, v_proj = pt_weights[f"{prefix}.in_proj_weight"].chunk(3, dim=0) + q_proj_bias, k_proj_bias, v_proj_bias = pt_weights[f"{prefix}.in_proj_bias"].chunk(3, dim=0) + + out_proj_weights = pt_weights[f"{prefix}.out_proj.weight"] + out_proj_bias = pt_weights[f"{prefix}.out_proj.bias"] + + hf_attn_layer.q_proj.weight.data = q_proj + hf_attn_layer.q_proj.bias.data = q_proj_bias + + hf_attn_layer.k_proj.weight.data = k_proj + hf_attn_layer.k_proj.bias.data = k_proj_bias + + hf_attn_layer.v_proj.weight.data = v_proj + hf_attn_layer.v_proj.bias.data = v_proj_bias + + hf_attn_layer.out_proj.weight.data = out_proj_weights + hf_attn_layer.out_proj.bias.data = out_proj_bias + + +def copy_mlp(hf_mlp, pt_weights, prefix): + copy_linear(hf_mlp.fc1, pt_weights, f"{prefix}.c_fc") + copy_linear(hf_mlp.fc2, pt_weights, f"{prefix}.c_proj") + + +def copy_linear(hf_linear, pt_weights, prefix): + hf_linear.weight.data = pt_weights[f"{prefix}.weight"].data + hf_linear.bias.data = pt_weights[f"{prefix}.bias"].data + + +def copy_layer(hf_layer, pt_weights, prefix): + # copy layer norms + copy_linear(hf_layer.layer_norm1, pt_weights, f"{prefix}.ln_1") + copy_linear(hf_layer.layer_norm2, pt_weights, f"{prefix}.ln_2") + + # copy MLP + copy_mlp(hf_layer.mlp, pt_weights, f"{prefix}.mlp") + + # copy attn + copy_attn_layer(hf_layer.self_attn, pt_weights, f"{prefix}.attn") + + +def copy_layers(hf_layers, pt_weights, prefix): + for layer_id, hf_layer in enumerate(hf_layers): + copy_layer(hf_layer, pt_weights, f"{prefix}.{layer_id}") + + +def copy_text_model_and_projection(hf_model, pt_weights): + # copy projection + hf_model.text_projection.weight.data = pt_weights["text_projection"].data.T + + # copy text encoder + for name, param in hf_model.text_model.named_parameters(): + param.data = pt_weights[f"bert.{name}"].data + + +def copy_vision_model_and_projection(hf_model, pt_weights): + # copy projection + hf_model.visual_projection.weight.data = pt_weights["visual.proj"].data.T + + # copy layer norms + copy_linear(hf_model.vision_model.pre_layrnorm, pt_weights, "visual.ln_pre") + copy_linear(hf_model.vision_model.post_layernorm, pt_weights, "visual.ln_post") + + # copy embeddings + hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_weights["visual.conv1.weight"].data + hf_model.vision_model.embeddings.class_embedding.data = pt_weights["visual.class_embedding"].data + hf_model.vision_model.embeddings.position_embedding.weight.data = pt_weights["visual.positional_embedding"].data + + # copy encoder + copy_layers(hf_model.vision_model.encoder.layers, pt_weights, "visual.transformer.resblocks") + + +@torch.no_grad() +def convert_chinese_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None): + """ + Copy/paste/tweak model's weights to transformers design. + """ + + assert config_path is not None, "Please specify the ChineseCLIP model config of the corresponding model size." + config = ChineseCLIPConfig.from_pretrained(config_path) + + hf_model = ChineseCLIPModel(config).eval() + + pt_weights = torch.load(checkpoint_path, map_location="cpu")["state_dict"] + pt_weights = {(name[7:] if name.startswith("module.") else name): value for name, value in pt_weights.items()} + + copy_text_model_and_projection(hf_model, pt_weights) + copy_vision_model_and_projection(hf_model, pt_weights) + hf_model.logit_scale.data = pt_weights["logit_scale"].data + + hf_model.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + type=str, + help="Path to the output folder storing converted hf PyTorch model.", + ) + parser.add_argument( + "--checkpoint_path", default=None, type=str, help="Path to original github format ChineseCLIP checkpoint." + ) + parser.add_argument( + "--config_path", default=None, required=True, type=str, help="Path to hf config.json of model to convert." + ) + args = parser.parse_args() + + convert_chinese_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) + print("The conversion is finished!") diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..09aa4106b718ebf39c793b8325892670af566fe3 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# Copyright 2021 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for Chinese-CLIP.""" + +import warnings + +from ...utils import logging +from .image_processing_chinese_clip import ChineseCLIPImageProcessor + + +logger = logging.get_logger(__name__) + + +class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor): + def __init__(self, *args, **kwargs) -> None: + warnings.warn( + "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." + " Please use ChineseCLIPImageProcessor instead.", + FutureWarning, + ) + super().__init__(*args, **kwargs) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..25d60d1ee765efb08eaa6242530bf9e8a93fafa9 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__init__.py @@ -0,0 +1,109 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_tf_available, + is_torch_available, + is_vision_available, +) + + +_import_structure = { + "configuration_efficientformer": [ + "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "EfficientFormerConfig", + ] +} + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["image_processing_efficientformer"] = ["EfficientFormerImageProcessor"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_efficientformer"] = [ + "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "EfficientFormerForImageClassification", + "EfficientFormerForImageClassificationWithTeacher", + "EfficientFormerModel", + "EfficientFormerPreTrainedModel", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_efficientformer"] = [ + "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFEfficientFormerForImageClassification", + "TFEfficientFormerForImageClassificationWithTeacher", + "TFEfficientFormerModel", + "TFEfficientFormerPreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .image_processing_efficientformer import EfficientFormerImageProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_efficientformer import ( + EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + EfficientFormerForImageClassification, + EfficientFormerForImageClassificationWithTeacher, + EfficientFormerModel, + EfficientFormerPreTrainedModel, + ) + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_efficientformer import ( + TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + TFEfficientFormerForImageClassification, + TFEfficientFormerForImageClassificationWithTeacher, + TFEfficientFormerModel, + TFEfficientFormerPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4f8b488f3e52f69388c5d6fe18677a28849b396 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c1b5229cb681ba54312f180423429e3c5d32cf1 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc1a09ab3da19d2f08e00a98127154fbaa1474dc Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_tf_efficientformer.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_tf_efficientformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77c72c30f0ca7aa755e7c58ba2a208b45c56afa7 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_tf_efficientformer.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..afb03ee9894b0ebe58c9ec864e3eccb32719b993 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/__init__.py @@ -0,0 +1,65 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, +) + + +_import_structure = { + "configuration_univnet": [ + "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP", + "UnivNetConfig", + ], + "feature_extraction_univnet": ["UnivNetFeatureExtractor"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_univnet"] = [ + "UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "UnivNetModel", + ] + + +if TYPE_CHECKING: + from .configuration_univnet import ( + UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP, + UnivNetConfig, + ) + from .feature_extraction_univnet import UnivNetFeatureExtractor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_univnet import ( + UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST, + UnivNetModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/__pycache__/feature_extraction_univnet.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/__pycache__/feature_extraction_univnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cee23bdcfad42cfdd1fb12abeb4fca0fbf7c7fbc Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/__pycache__/feature_extraction_univnet.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/convert_univnet.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/convert_univnet.py new file mode 100644 index 0000000000000000000000000000000000000000..30520b7fa14725b0bdaf9e0c7a4aed92ad8ea318 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/convert_univnet.py @@ -0,0 +1,162 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import torch + +from transformers import UnivNetConfig, UnivNetModel, logging + + +logging.set_verbosity_info() +logger = logging.get_logger("transformers.models.univnet") + + +def get_kernel_predictor_key_mapping(config: UnivNetConfig, old_prefix: str = "", new_prefix: str = ""): + mapping = {} + # Initial conv layer + mapping[f"{old_prefix}.input_conv.0.weight_g"] = f"{new_prefix}.input_conv.weight_g" + mapping[f"{old_prefix}.input_conv.0.weight_v"] = f"{new_prefix}.input_conv.weight_v" + mapping[f"{old_prefix}.input_conv.0.bias"] = f"{new_prefix}.input_conv.bias" + + # Kernel predictor resnet blocks + for i in range(config.kernel_predictor_num_blocks): + mapping[f"{old_prefix}.residual_convs.{i}.1.weight_g"] = f"{new_prefix}.resblocks.{i}.conv1.weight_g" + mapping[f"{old_prefix}.residual_convs.{i}.1.weight_v"] = f"{new_prefix}.resblocks.{i}.conv1.weight_v" + mapping[f"{old_prefix}.residual_convs.{i}.1.bias"] = f"{new_prefix}.resblocks.{i}.conv1.bias" + + mapping[f"{old_prefix}.residual_convs.{i}.3.weight_g"] = f"{new_prefix}.resblocks.{i}.conv2.weight_g" + mapping[f"{old_prefix}.residual_convs.{i}.3.weight_v"] = f"{new_prefix}.resblocks.{i}.conv2.weight_v" + mapping[f"{old_prefix}.residual_convs.{i}.3.bias"] = f"{new_prefix}.resblocks.{i}.conv2.bias" + + # Kernel output conv + mapping[f"{old_prefix}.kernel_conv.weight_g"] = f"{new_prefix}.kernel_conv.weight_g" + mapping[f"{old_prefix}.kernel_conv.weight_v"] = f"{new_prefix}.kernel_conv.weight_v" + mapping[f"{old_prefix}.kernel_conv.bias"] = f"{new_prefix}.kernel_conv.bias" + + # Bias output conv + mapping[f"{old_prefix}.bias_conv.weight_g"] = f"{new_prefix}.bias_conv.weight_g" + mapping[f"{old_prefix}.bias_conv.weight_v"] = f"{new_prefix}.bias_conv.weight_v" + mapping[f"{old_prefix}.bias_conv.bias"] = f"{new_prefix}.bias_conv.bias" + + return mapping + + +def get_key_mapping(config: UnivNetConfig): + mapping = {} + + # NOTE: inital conv layer keys are the same + + # LVC Residual blocks + for i in range(len(config.resblock_stride_sizes)): + # LVCBlock initial convt layer + mapping[f"res_stack.{i}.convt_pre.1.weight_g"] = f"resblocks.{i}.convt_pre.weight_g" + mapping[f"res_stack.{i}.convt_pre.1.weight_v"] = f"resblocks.{i}.convt_pre.weight_v" + mapping[f"res_stack.{i}.convt_pre.1.bias"] = f"resblocks.{i}.convt_pre.bias" + + # Kernel predictor + kernel_predictor_mapping = get_kernel_predictor_key_mapping( + config, old_prefix=f"res_stack.{i}.kernel_predictor", new_prefix=f"resblocks.{i}.kernel_predictor" + ) + mapping.update(kernel_predictor_mapping) + + # LVC Residual blocks + for j in range(len(config.resblock_dilation_sizes[i])): + mapping[f"res_stack.{i}.conv_blocks.{j}.1.weight_g"] = f"resblocks.{i}.resblocks.{j}.conv.weight_g" + mapping[f"res_stack.{i}.conv_blocks.{j}.1.weight_v"] = f"resblocks.{i}.resblocks.{j}.conv.weight_v" + mapping[f"res_stack.{i}.conv_blocks.{j}.1.bias"] = f"resblocks.{i}.resblocks.{j}.conv.bias" + + # Output conv layer + mapping["conv_post.1.weight_g"] = "conv_post.weight_g" + mapping["conv_post.1.weight_v"] = "conv_post.weight_v" + mapping["conv_post.1.bias"] = "conv_post.bias" + + return mapping + + +def rename_state_dict(state_dict, keys_to_modify, keys_to_remove): + model_state_dict = {} + for key, value in state_dict.items(): + if key in keys_to_remove: + continue + + if key in keys_to_modify: + new_key = keys_to_modify[key] + model_state_dict[new_key] = value + else: + model_state_dict[key] = value + return model_state_dict + + +def convert_univnet_checkpoint( + checkpoint_path, + pytorch_dump_folder_path, + config_path=None, + repo_id=None, + safe_serialization=False, +): + model_state_dict_base = torch.load(checkpoint_path, map_location="cpu") + # Get the generator's state dict + state_dict = model_state_dict_base["model_g"] + + if config_path is not None: + config = UnivNetConfig.from_pretrained(config_path) + else: + config = UnivNetConfig() + + keys_to_modify = get_key_mapping(config) + keys_to_remove = set() + hf_state_dict = rename_state_dict(state_dict, keys_to_modify, keys_to_remove) + + model = UnivNetModel(config) + # Apply weight norm since the original checkpoint has weight norm applied + model.apply_weight_norm() + model.load_state_dict(hf_state_dict) + # Remove weight norm in preparation for inference + model.remove_weight_norm() + + model.save_pretrained(pytorch_dump_folder_path, safe_serialization=safe_serialization) + + if repo_id: + print("Pushing to the hub...") + model.push_to_hub(repo_id) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") + parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") + parser.add_argument( + "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." + ) + parser.add_argument( + "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." + ) + parser.add_argument( + "--safe_serialization", action="store_true", help="Whether to save the model using `safetensors`." + ) + + args = parser.parse_args() + + convert_univnet_checkpoint( + args.checkpoint_path, + args.pytorch_dump_folder_path, + args.config_path, + args.push_to_hub, + args.safe_serialization, + ) + + +if __name__ == "__main__": + main() diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/feature_extraction_univnet.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/feature_extraction_univnet.py new file mode 100644 index 0000000000000000000000000000000000000000..067aacc3d8c8ca51336680ee7afe8a9fec677fd7 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/feature_extraction_univnet.py @@ -0,0 +1,456 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for UnivNetModel.""" + +from typing import Any, Dict, List, Optional, Union + +import numpy as np + +from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function +from ...feature_extraction_sequence_utils import SequenceFeatureExtractor +from ...feature_extraction_utils import BatchFeature +from ...utils import PaddingStrategy, TensorType, logging + + +logger = logging.get_logger(__name__) + + +class UnivNetFeatureExtractor(SequenceFeatureExtractor): + r""" + Constructs a UnivNet feature extractor. + + This class extracts log-mel-filter bank features from raw speech using the short time Fourier Transform (STFT). The + STFT implementation follows that of TacoTron 2 and Hifi-GAN. + + This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains + most of the main methods. Users should refer to this superclass for more information regarding those methods. + + Args: + feature_size (`int`, *optional*, defaults to 1): + The feature dimension of the extracted features. + sampling_rate (`int`, *optional*, defaults to 24000): + The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). + padding_value (`float`, *optional*, defaults to 0.0): + The value to pad with when applying the padding strategy defined by the `padding` argument to + [`UnivNetFeatureExtractor.__call__`]. Should correspond to audio silence. The `pad_end` argument to + `__call__` will also use this padding value. + do_normalize (`bool`, *optional*, defaults to `False`): + Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve the + performance for some models. + num_mel_bins (`int`, *optional*, defaults to 100): + The number of mel-frequency bins in the extracted spectrogram features. This should match + `UnivNetModel.config.num_mel_bins`. + hop_length (`int`, *optional*, defaults to 256): + The direct number of samples between sliding windows. Otherwise referred to as "shift" in many papers. Note + that this is different from other audio feature extractors such as [`SpeechT5FeatureExtractor`] which take + the `hop_length` in ms. + win_length (`int`, *optional*, defaults to 1024): + The direct number of samples for each sliding window. Note that this is different from other audio feature + extractors such as [`SpeechT5FeatureExtractor`] which take the `win_length` in ms. + win_function (`str`, *optional*, defaults to `"hann_window"`): + Name for the window function used for windowing, must be accessible via `torch.{win_function}` + filter_length (`int`, *optional*, defaults to 1024): + The number of FFT components to use. If `None`, this is determined using + `transformers.audio_utils.optimal_fft_length`. + max_length_s (`int`, *optional*, defaults to 10): + The maximum input lenght of the model in seconds. This is used to pad the audio. + fmin (`float`, *optional*, defaults to 0.0): + Minimum mel frequency in Hz. + fmax (`float`, *optional*): + Maximum mel frequency in Hz. If not set, defaults to `sampling_rate / 2`. + mel_floor (`float`, *optional*, defaults to 1e-09): + Minimum value of mel frequency banks. Note that the way [`UnivNetFeatureExtractor`] uses `mel_floor` is + different than in [`transformers.audio_utils.spectrogram`]. + center (`bool`, *optional*, defaults to `False`): + Whether to pad the waveform so that frame `t` is centered around time `t * hop_length`. If `False`, frame + `t` will start at time `t * hop_length`. + compression_factor (`float`, *optional*, defaults to 1.0): + The multiplicative compression factor for dynamic range compression during spectral normalization. + compression_clip_val (`float`, *optional*, defaults to 1e-05): + The clip value applied to the waveform before applying dynamic range compression during spectral + normalization. + normalize_min (`float`, *optional*, defaults to -11.512925148010254): + The min value used for Tacotron 2-style linear normalization. The default is the original value from the + Tacotron 2 implementation. + normalize_max (`float`, *optional*, defaults to 2.3143386840820312): + The max value used for Tacotron 2-style linear normalization. The default is the original value from the + Tacotron 2 implementation. + model_in_channels (`int`, *optional*, defaults to 64): + The number of input channels to the [`UnivNetModel`] model. This should match + `UnivNetModel.config.model_in_channels`. + pad_end_length (`int`, *optional*, defaults to 10): + If padding the end of each waveform, the number of spectrogram frames worth of samples to append. The + number of appended samples will be `pad_end_length * hop_length`. + return_attention_mask (`bool`, *optional*, defaults to `True`): + Whether or not [`~UnivNetFeatureExtractor.__call__`] should return `attention_mask`. + """ + + model_input_names = ["input_features", "noise_sequence", "padding_mask"] + + def __init__( + self, + feature_size: int = 1, + sampling_rate: int = 24000, + padding_value: float = 0.0, + do_normalize: bool = False, + num_mel_bins: int = 100, + hop_length: int = 256, + win_length: int = 1024, + win_function: str = "hann_window", + filter_length: Optional[int] = 1024, + max_length_s: int = 10, + fmin: float = 0.0, + fmax: Optional[float] = None, + mel_floor: float = 1e-9, + center: bool = False, + compression_factor: float = 1.0, + compression_clip_val: float = 1e-5, + normalize_min: float = -11.512925148010254, + normalize_max: float = 2.3143386840820312, + model_in_channels: int = 64, + pad_end_length: int = 10, + return_attention_mask=True, + **kwargs, + ): + super().__init__( + feature_size=feature_size, + sampling_rate=sampling_rate, + padding_value=padding_value, + return_attention_mask=return_attention_mask, + **kwargs, + ) + + self.do_normalize = do_normalize + + self.num_mel_bins = num_mel_bins + self.hop_length = hop_length + self.win_length = win_length + self.win_function = win_function + self.filter_length = filter_length + self.fmin = fmin + if fmax is None: + # Follows the librosa.filters.mel implementation + fmax = float(sampling_rate) / 2 + self.fmax = fmax + self.mel_floor = mel_floor + + self.max_length_s = max_length_s + self.num_max_samples = max_length_s * sampling_rate + + if self.filter_length is None: + self.n_fft = optimal_fft_length(self.win_length) + else: + self.n_fft = self.filter_length + self.n_freqs = (self.n_fft // 2) + 1 + + self.window = window_function(window_length=self.win_length, name=self.win_function, periodic=True) + + self.mel_filters = mel_filter_bank( + num_frequency_bins=self.n_freqs, + num_mel_filters=self.num_mel_bins, + min_frequency=self.fmin, + max_frequency=self.fmax, + sampling_rate=self.sampling_rate, + norm="slaney", + mel_scale="slaney", + ) + + self.center = center + self.compression_factor = compression_factor + self.compression_clip_val = compression_clip_val + self.normalize_min = normalize_min + self.normalize_max = normalize_max + self.model_in_channels = model_in_channels + self.pad_end_length = pad_end_length + + def normalize(self, spectrogram): + return 2 * ((spectrogram - self.normalize_min) / (self.normalize_max - self.normalize_min)) - 1 + + def denormalize(self, spectrogram): + return self.normalize_min + (self.normalize_max - self.normalize_min) * ((spectrogram + 1) / 2) + + def mel_spectrogram(self, waveform: np.ndarray) -> np.ndarray: + """ + Calculates log MEL spectrograms from a batch of waveforms. Note that the input waveform(s) will be padded by + `int(self.n_fft - self.hop_length) / 2` on both sides using the `reflect` padding mode. + + Args: + waveform (`np.ndarray` of shape `(length,)`): + The input waveform. This must be a single real-valued, mono waveform. + + Returns: + `numpy.ndarray`: Array containing a log-mel spectrogram of shape `(num_frames, num_mel_bins)`. + """ + # Do custom padding based on the official MelGAN and Hifi-GAN implementations + # See https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/utils/stft.py#L84-L86 + waveform = np.pad( + waveform, + (int((self.n_fft - self.hop_length) / 2), int((self.n_fft - self.hop_length) / 2)), + mode="reflect", + ) + + # Get the complex spectrogram. + # Note: waveform must be unbatched currently due to the implementation of spectrogram(...). + complex_spectrogram = spectrogram( + waveform, + window=self.window, + frame_length=self.n_fft, + hop_length=self.hop_length, + fft_length=self.n_fft, + power=None, + center=self.center, + mel_filters=None, + mel_floor=None, + ) + + # Apply the MEL filter bank and MEL floor manually since UnivNet uses a slightly different implementation + amplitude_spectrogram = np.sqrt( + np.real(complex_spectrogram) ** 2 + np.imag(complex_spectrogram) ** 2 + self.mel_floor + ) + mel_spectrogram = np.matmul(self.mel_filters.T, amplitude_spectrogram) + + # Perform spectral normalization to get the log mel spectrogram. + log_mel_spectrogram = np.log( + np.clip(mel_spectrogram, a_min=self.compression_clip_val, a_max=None) * self.compression_factor + ) + + # Return spectrogram with num_mel_bins last + return log_mel_spectrogram.T + + def generate_noise( + self, + noise_length: int, + generator: Optional[np.random.Generator] = None, + ) -> np.ndarray: + """ + Generates a random noise sequence of standard Gaussian noise for use in the `noise_sequence` argument of + [`UnivNetModel.forward`]. + + Args: + spectrogram_length (`int`): + The length (dim 0) of the generated noise. + model_in_channels (`int`, *optional*, defaults to `None`): + The number of features (dim 1) of the generated noise. This should correspond to the + `model_in_channels` of the [`UnivNetGan`] model. If not set, this will default to + `self.config.model_in_channels`. + generator (`numpy.random.Generator`, *optional*, defaults to `None`) + An optional `numpy.random.Generator` random number generator to control noise generation. If not set, a + new generator with fresh entropy will be created. + + Returns: + `numpy.ndarray`: Array containing random standard Gaussian noise of shape `(noise_length, + model_in_channels)`. + """ + if generator is None: + generator = np.random.default_rng() + + noise_shape = (noise_length, self.model_in_channels) + noise = generator.standard_normal(noise_shape, dtype=np.float32) + + return noise + + def batch_decode(self, waveforms, waveform_lengths=None) -> List[np.ndarray]: + r""" + Removes padding from generated audio after running [`UnivNetModel.forward`]. This returns a ragged list of 1D + audio waveform arrays and not a single tensor/array because in general the waveforms will have different + lengths after removing padding. + + Args: + waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + The batched output waveforms from the [`UnivNetModel`]. + waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`, *optional*): + The batched lengths of each waveform before padding. + + Returns: + `List[np.ndarray]`: A ragged list of 1D waveform arrays with padding removed. + """ + # Collapse the batched waveform tensor to a list of 1D audio waveforms + waveforms = [waveform.detach().clone().cpu().numpy() for waveform in waveforms] + + if waveform_lengths is not None: + waveforms = [waveform[: waveform_lengths[i]] for i, waveform in enumerate(waveforms)] + + return waveforms + + def __call__( + self, + raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], + sampling_rate: Optional[int] = None, + padding: Union[bool, str, PaddingStrategy] = True, + max_length: Optional[int] = None, + truncation: bool = True, + pad_to_multiple_of: Optional[int] = None, + return_noise: bool = True, + generator: Optional[np.random.Generator] = None, + pad_end: bool = False, + pad_length: Optional[int] = None, + do_normalize: Optional[str] = None, + return_attention_mask: Optional[bool] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + ) -> BatchFeature: + """ + Main method to featurize and prepare for the model one or several sequence(s). + + Args: + raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): + The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float + values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not + stereo, i.e. single float per timestep. + sampling_rate (`int`, *optional*): + The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass + `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition + pipeline. + padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): + Select a strategy to pad the input `raw_speech` waveforms (according to the model's padding side and + padding index) among: + + - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single + sequence if provided). + - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum + acceptable input length for the model if that argument is not provided. + - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different + lengths). + + If `pad_end = True`, that padding will occur before the `padding` strategy is applied. + max_length (`int`, *optional*): + Maximum length of the returned list and optionally padding length (see above). + truncation (`bool`, *optional*, defaults to `True`): + Activates truncation to cut input sequences longer than `max_length` to `max_length`. + pad_to_multiple_of (`int`, *optional*): + If set will pad the sequence to a multiple of the provided value. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. + return_noise (`bool`, *optional*, defaults to `True`): + Whether to generate and return a noise waveform for use in [`UnivNetModel.forward`]. + generator (`numpy.random.Generator`, *optional*, defaults to `None`): + An optional `numpy.random.Generator` random number generator to use when generating noise. + pad_end (`bool`, *optional*, defaults to `False`): + Whether to pad the end of each waveform with silence. This can help reduce artifacts at the end of the + generated audio sample; see https://github.com/seungwonpark/melgan/issues/8 for more details. This + padding will be done before the padding strategy specified in `padding` is performed. + pad_length (`int`, *optional*, defaults to `None`): + If padding the end of each waveform, the length of the padding in spectrogram frames. If not set, this + will default to `self.config.pad_end_length`. + do_normalize (`bool`, *optional*): + Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve + the performance for some models. If not set, this will default to `self.config.do_normalize`. + return_attention_mask (`bool`, *optional*): + Whether to return the attention mask. If left to the default, will return the attention mask according + to the specific feature_extractor's default. + + [What are attention masks?](../glossary#attention-mask) + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.np.array` objects. + - `'np'`: Return Numpy `np.ndarray` objects. + """ + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + + if sampling_rate is not None: + if sampling_rate != self.sampling_rate: + raise ValueError( + f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" + f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" + f" was sampled with {self.sampling_rate} and not {sampling_rate}." + ) + else: + logger.warning( + "It is strongly recommended to pass the `sampling_rate` argument to this function. " + "Failing to do so can result in silent errors that might be hard to debug." + ) + + is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 + if is_batched_numpy and len(raw_speech.shape) > 2: + raise ValueError(f"Only mono-channel audio is supported for input to {self}") + is_batched = is_batched_numpy or ( + isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) + ) + + if is_batched: + raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] + elif not is_batched and not isinstance(raw_speech, np.ndarray): + raw_speech = np.asarray(raw_speech, dtype=np.float32) + elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): + raw_speech = raw_speech.astype(np.float32) + + # always return batch + if not is_batched: + raw_speech = [np.asarray(raw_speech, dtype=np.float32)] + + # Pad end to reduce artifacts + if pad_end: + pad_length = pad_length if pad_length is not None else self.pad_end_length + raw_speech = [ + np.pad(waveform, (0, pad_length * self.hop_length), constant_values=self.padding_value) + for waveform in raw_speech + ] + + batched_speech = BatchFeature({"input_features": raw_speech}) + + padded_inputs = self.pad( + batched_speech, + padding=padding, + max_length=max_length if max_length is not None else self.num_max_samples, + truncation=truncation, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + + # make sure list is in array format + # input_features = padded_inputs.get("input_features").transpose(2, 0, 1) + input_features = padded_inputs.get("input_features") + + mel_spectrograms = [self.mel_spectrogram(waveform) for waveform in input_features] + + if isinstance(input_features[0], List): + batched_speech["input_features"] = [np.asarray(mel, dtype=np.float32) for mel in mel_spectrograms] + else: + batched_speech["input_features"] = [mel.astype(np.float32) for mel in mel_spectrograms] + + # convert attention_mask to correct format + attention_mask = padded_inputs.get("attention_mask") + if attention_mask is not None: + batched_speech["padding_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask] + + if return_noise: + noise = [ + self.generate_noise(spectrogram.shape[0], generator) + for spectrogram in batched_speech["input_features"] + ] + batched_speech["noise_sequence"] = noise + + if do_normalize: + batched_speech["input_features"] = [ + self.normalize(spectrogram) for spectrogram in batched_speech["input_features"] + ] + + if return_tensors is not None: + batched_speech = batched_speech.convert_to_tensors(return_tensors) + + return batched_speech + + def to_dict(self) -> Dict[str, Any]: + output = super().to_dict() + + # Don't serialize these as they are derived from the other properties. + names = ["window", "mel_filters", "n_fft", "n_freqs", "num_max_samples"] + for name in names: + if name in output: + del output[name] + + return output diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/modeling_univnet.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/modeling_univnet.py new file mode 100644 index 0000000000000000000000000000000000000000..dc9beddec525b8ae06e06c1d4e4d2460408fa8d5 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/univnet/modeling_univnet.py @@ -0,0 +1,636 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch UnivNetModel model.""" + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn + +from ...modeling_utils import ModelOutput, PreTrainedModel +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_univnet import UnivNetConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "UnivNetConfig" + +_CHECKPOINT_FOR_DOC = "dg845/univnet-dev" + +UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "dg845/univnet-dev", + # See all UnivNet models at https://huggingface.co/models?filter=univnet +] + + +@dataclass +class UnivNetModelOutput(ModelOutput): + """ + Output class for the [`UnivNetModel`], which includes the generated audio waveforms and the original unpadded + lengths of those waveforms (so that the padding can be removed by [`UnivNetModel.batch_decode`]). + + Args: + waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Batched 1D (mono-channel) output audio waveforms. + waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`): + The batched length in samples of each unpadded waveform in `waveforms`. + """ + + waveforms: torch.FloatTensor = None + waveform_lengths: torch.FloatTensor = None + + +class UnivNetKernelPredictorResidualBlock(nn.Module): + """ + Implementation of the residual block for the kernel predictor network inside each location variable convolution + block (LVCBlock). + + Parameters: + config: (`UnivNetConfig`): + Config for the `UnivNetModel` model. + """ + + def __init__( + self, + config: UnivNetConfig, + ): + super().__init__() + self.channels = config.model_in_channels + self.kernel_size = config.kernel_predictor_conv_size + self.dropout_prob = config.kernel_predictor_dropout + self.leaky_relu_slope = config.leaky_relu_slope + + padding = (self.kernel_size - 1) // 2 + + self.dropout = nn.Dropout(self.dropout_prob) + self.conv1 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True) + self.conv2 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True) + + def forward(self, hidden_states: torch.FloatTensor): + # hidden_states should have shape (batch_size, channels, seq_length) + residual = hidden_states + hidden_states = self.dropout(hidden_states) + hidden_states = self.conv1(hidden_states) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = self.conv2(hidden_states) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + return hidden_states + residual + + def apply_weight_norm(self): + nn.utils.weight_norm(self.conv1) + nn.utils.weight_norm(self.conv2) + + def remove_weight_norm(self): + nn.utils.remove_weight_norm(self.conv1) + nn.utils.remove_weight_norm(self.conv2) + + +class UnivNetKernelPredictor(nn.Module): + """ + Implementation of the kernel predictor network which supplies the kernel and bias for the location variable + convolutional layers (LVCs) in each UnivNet LVCBlock. + + Based on the KernelPredictor implementation in + [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L7). + + Parameters: + config: (`UnivNetConfig`): + Config for the `UnivNetModel` model. + conv_kernel_size (`int`, *optional*, defaults to 3): + The kernel size for the location variable convolutional layer kernels (convolutional weight tensor). + conv_layers (`int`, *optional*, defaults to 4): + The number of location variable convolutional layers to output kernels and biases for. + """ + + def __init__( + self, + config: UnivNetConfig, + conv_kernel_size: int = 3, + conv_layers: int = 4, + ): + super().__init__() + + self.conv_in_channels = config.model_hidden_channels + self.conv_out_channels = 2 * config.model_hidden_channels + self.conv_kernel_size = conv_kernel_size + self.conv_layers = conv_layers + + self.kernel_channels = ( + self.conv_in_channels * self.conv_out_channels * self.conv_kernel_size * self.conv_layers + ) + self.bias_channels = self.conv_out_channels * self.conv_layers + + self.resnet_in_channels = config.num_mel_bins + self.resnet_hidden_channels = config.kernel_predictor_hidden_channels + self.resnet_kernel_size = config.kernel_predictor_conv_size + self.num_blocks = config.kernel_predictor_num_blocks + + self.leaky_relu_slope = config.leaky_relu_slope + + padding = (self.resnet_kernel_size - 1) // 2 + + self.input_conv = nn.Conv1d(self.resnet_in_channels, self.resnet_hidden_channels, 5, padding=2, bias=True) + + self.resblocks = nn.ModuleList([UnivNetKernelPredictorResidualBlock(config) for _ in range(self.num_blocks)]) + + self.kernel_conv = nn.Conv1d( + self.resnet_hidden_channels, self.kernel_channels, self.resnet_kernel_size, padding=padding, bias=True + ) + self.bias_conv = nn.Conv1d( + self.resnet_hidden_channels, self.bias_channels, self.resnet_kernel_size, padding=padding, bias=True + ) + + def forward(self, spectrogram: torch.FloatTensor): + """ + Maps a conditioning log-mel spectrogram to a tensor of convolutional kernels and biases, for use in location + variable convolutional layers. Note that the input spectrogram should have shape (batch_size, input_channels, + seq_length). + + Args: + spectrogram (`torch.FloatTensor` of shape `(batch_size, input_channels, seq_length)`): + Tensor containing the log-mel spectrograms. + + Returns: + Tuple[`torch.FloatTensor, `torch.FloatTensor`]: tuple of tensors where the first element is the tensor of + location variable convolution kernels of shape `(batch_size, self.conv_layers, self.conv_in_channels, + self.conv_out_channels, self.conv_kernel_size, seq_length)` and the second element is the tensor of + location variable convolution biases of shape `(batch_size, self.conv_layers. self.conv_out_channels, + seq_length)`. + """ + batch_size, _, seq_length = spectrogram.shape + + hidden_states = self.input_conv(spectrogram) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + + for resblock in self.resblocks: + hidden_states = resblock(hidden_states) + + kernel_hidden_states = self.kernel_conv(hidden_states) + bias_hidden_states = self.bias_conv(hidden_states) + + # Reshape kernels and biases to appropriate shape + kernels = kernel_hidden_states.view( + batch_size, + self.conv_layers, + self.conv_in_channels, + self.conv_out_channels, + self.conv_kernel_size, + seq_length, + ).contiguous() + biases = bias_hidden_states.view( + batch_size, + self.conv_layers, + self.conv_out_channels, + seq_length, + ).contiguous() + + return kernels, biases + + def apply_weight_norm(self): + nn.utils.weight_norm(self.input_conv) + for layer in self.resblocks: + layer.apply_weight_norm() + nn.utils.weight_norm(self.kernel_conv) + nn.utils.weight_norm(self.bias_conv) + + def remove_weight_norm(self): + nn.utils.remove_weight_norm(self.input_conv) + for layer in self.resblocks: + layer.remove_weight_norm() + nn.utils.remove_weight_norm(self.kernel_conv) + nn.utils.remove_weight_norm(self.bias_conv) + + +class UnivNetLvcResidualBlock(nn.Module): + """ + Implementation of the location variable convolution (LVC) residual block for the UnivNet residual network. + + Parameters: + config: (`UnivNetConfig`): + Config for the `UnivNetModel` model. + kernel_size (`int`): + The kernel size for the dilated 1D convolutional layer. + dilation (`int`): + The dilation for the dilated 1D convolutional layer. + """ + + def __init__( + self, + config: UnivNetConfig, + kernel_size: int, + dilation: int, + ): + super().__init__() + self.hidden_channels = config.model_hidden_channels + self.kernel_size = kernel_size + self.dilation = dilation + self.leaky_relu_slope = config.leaky_relu_slope + + padding = self.dilation * (self.kernel_size - 1) // 2 + + self.conv = nn.Conv1d( + self.hidden_channels, + self.hidden_channels, + self.kernel_size, + padding=padding, + dilation=self.dilation, + ) + + def forward(self, hidden_states, kernel, bias, hop_size=256): + residual = hidden_states + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = self.conv(hidden_states) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = self.location_variable_convolution(hidden_states, kernel, bias, hop_size=hop_size) + # Gated activation unit + hidden_states = torch.sigmoid(hidden_states[:, : self.hidden_channels, :]) * torch.tanh( + hidden_states[:, self.hidden_channels :, :] + ) + # Skip connection + hidden_states = residual + hidden_states + + return hidden_states + + # Based on https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L171 + def location_variable_convolution( + self, + hidden_states: torch.FloatTensor, + kernel: torch.FloatTensor, + bias: torch.FloatTensor, + dilation: int = 1, + hop_size: int = 256, + ): + """ + Performs location-variable convolution operation on the input sequence (hidden_states) using the local + convolution kernel. This was introduced in [LVCNet: Efficient Condition-Dependent Modeling Network for Waveform + Generation](https://arxiv.org/abs/2102.10815) by Zhen Zheng, Jianzong Wang, Ning Cheng, and Jing Xiao. + + Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100. + + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, in_channels, in_length)`): + The input sequence of shape (batch, in_channels, in_length). + kernel (`torch.FloatTensor` of shape `(batch_size, in_channels, out_channels, kernel_size, kernel_length)`): + The local convolution kernel of shape (batch, in_channels, out_channels, kernel_size, kernel_length). + bias (`torch.FloatTensor` of shape `(batch_size, out_channels, kernel_length)`): + The bias for the local convolution of shape (batch, out_channels, kernel_length). + dilation (`int`, *optional*, defaults to 1): + The dilation of convolution. + hop_size (`int`, *optional*, defaults to 256): + The hop_size of the conditioning sequence. + Returns: + `torch.FloatTensor`: the output sequence after performing local convolution with shape (batch_size, + out_channels, in_length). + """ + batch, _, in_length = hidden_states.shape + batch, _, out_channels, kernel_size, kernel_length = kernel.shape + if in_length != (kernel_length * hop_size): + raise ValueError( + f"Dim 2 of `hidden_states` should be {kernel_length * hop_size}) but got {in_length}. Please check" + " `hidden_states` or `kernel` and `hop_size` to make sure they are correct." + ) + + padding = dilation * int((kernel_size - 1) / 2) + + # (batch, in_channels, in_length + 2*padding) + hidden_states = nn.functional.pad(hidden_states, (padding, padding), "constant", 0) + # (batch, in_channels, kernel_length, hop_size + 2*padding) + hidden_states = hidden_states.unfold(2, hop_size + 2 * padding, hop_size) + + if hop_size < dilation: + hidden_states = nn.functional.pad(hidden_states, (0, dilation), "constant", 0) + # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation) + hidden_states = hidden_states.unfold(3, dilation, dilation) + hidden_states = hidden_states[:, :, :, :, :hop_size] + # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation) + hidden_states = hidden_states.transpose(3, 4) + # (batch, in_channels, kernel_length, dilation, _, kernel_size) + hidden_states = hidden_states.unfold(4, kernel_size, 1) + + # Apply local convolution kernel to hidden_states. + output_hidden_states = torch.einsum("bildsk,biokl->bolsd", hidden_states, kernel) + + output_hidden_states = output_hidden_states.to(memory_format=torch.channels_last_3d) + bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d) + output_hidden_states = output_hidden_states + bias + output_hidden_states = output_hidden_states.contiguous().view(batch, out_channels, -1) + + return output_hidden_states + + def apply_weight_norm(self): + nn.utils.weight_norm(self.conv) + + def remove_weight_norm(self): + nn.utils.remove_weight_norm(self.conv) + + +class UnivNetLvcBlock(nn.Module): + """ + Implementation of the location variable convolution (LVC) residual block of the UnivNet residual block. Includes a + `UnivNetKernelPredictor` inside to predict the kernels and biases of the LVC layers. + + Based on LVCBlock in + [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L98) + + Parameters: + config (`UnivNetConfig`): + Config for the `UnivNetModel` model. + layer_id (`int`): + An integer corresponding to the index of the current LVC resnet block layer. This should be between 0 and + `len(config.resblock_stride_sizes) - 1)` inclusive. + lvc_hop_size (`int`, *optional*, defaults to 256): + The hop size for the location variable convolutional layers. + """ + + def __init__( + self, + config: UnivNetConfig, + layer_id: int, + lvc_hop_size: int = 256, + ): + super().__init__() + self.hidden_channels = config.model_hidden_channels + self.kernel_size = config.resblock_kernel_sizes[layer_id] + self.stride = config.resblock_stride_sizes[layer_id] + self.dilations = config.resblock_dilation_sizes[layer_id] + self.cond_hop_length = lvc_hop_size + self.leaky_relu_slope = config.leaky_relu_slope + self.num_blocks = len(self.dilations) + + self.convt_pre = nn.ConvTranspose1d( + self.hidden_channels, + self.hidden_channels, + 2 * self.stride, + stride=self.stride, + padding=self.stride // 2 + self.stride % 2, + output_padding=self.stride % 2, + ) + + self.kernel_predictor = UnivNetKernelPredictor(config, self.kernel_size, self.num_blocks) + + self.resblocks = nn.ModuleList( + [UnivNetLvcResidualBlock(config, self.kernel_size, self.dilations[i]) for i in range(self.num_blocks)] + ) + + def forward(self, hidden_states: torch.FloatTensor, spectrogram: torch.FloatTensor): + # hidden_states: (batch_size, hidden_channels, seq_length) + # spectrogram: (batch_size, cond_channels, cond_length) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = self.convt_pre(hidden_states) + + kernels, biases = self.kernel_predictor(spectrogram) + + for i, resblock in enumerate(self.resblocks): + kernel = kernels[:, i, :, :, :, :] + bias = biases[:, i, :, :] + hidden_states = resblock(hidden_states, kernel, bias, hop_size=self.cond_hop_length) + + return hidden_states + + def apply_weight_norm(self): + nn.utils.weight_norm(self.convt_pre) + self.kernel_predictor.apply_weight_norm() + for layer in self.resblocks: + layer.apply_weight_norm() + + def remove_weight_norm(self): + nn.utils.remove_weight_norm(self.convt_pre) + self.kernel_predictor.remove_weight_norm() + for layer in self.resblocks: + layer.remove_weight_norm() + + +UNIVNET_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`UnivNetConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +UNIVNET_INPUTS_DOCSTRING = r""" + Converts a noise waveform and a conditioning spectrogram to a speech waveform. Passing a batch of log-mel + spectrograms returns a batch of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a + single, un-batched speech waveform. + + Args: + input_features (`torch.FloatTensor`): + Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length, + config.num_mel_channels)`, or un-batched and of shape `(sequence_length, config.num_mel_channels)`. + noise_sequence (`torch.FloatTensor`, *optional*): + Tensor containing a noise sequence of standard Gaussian noise. Can be batched and of shape `(batch_size, + sequence_length, config.model_in_channels)`, or un-batched and of shape (sequence_length, + config.model_in_channels)`. If not supplied, will be randomly generated. + padding_mask (`torch.BoolTensor`, *optional*): + Mask indicating which parts of each sequence are padded. Mask values are selected in `[0, 1]`: + + - 1 for tokens that are **not masked** + - 0 for tokens that are **masked** + + The mask can be batched and of shape `(batch_size, sequence_length)` or un-batched and of shape + `(sequence_length,)`. + generator (`torch.Generator`, *optional*): + A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation + deterministic. + return_dict: + Whether to return a [`~utils.ModelOutput`] subclass instead of a plain tuple. +""" + + +@add_start_docstrings( + """UnivNet GAN vocoder.""", + UNIVNET_START_DOCSTRING, +) +class UnivNetModel(PreTrainedModel): + config_class = UnivNetConfig + main_input_name = "input_features" + + def __init__(self, config: UnivNetConfig): + super().__init__(config) + + self.num_kernels = len(config.resblock_kernel_sizes) + self.leaky_relu_slope = config.leaky_relu_slope + + self.conv_pre = nn.Conv1d( + config.model_in_channels, + config.model_hidden_channels, + kernel_size=7, + stride=1, + padding=3, + padding_mode="reflect", + ) + + # Initialize location-variable convolution ResNet Blocks. + num_layers = len(config.resblock_stride_sizes) + hop_length = 1 + hop_lengths = [] + for stride in config.resblock_stride_sizes: + hop_length = hop_length * stride + hop_lengths.append(hop_length) + + self.resblocks = nn.ModuleList( + [ + UnivNetLvcBlock( + config, + layer_id=i, + lvc_hop_size=hop_lengths[i], + ) + for i in range(num_layers) + ] + ) + + self.conv_post = nn.Conv1d(config.model_hidden_channels, 1, 7, padding=3, padding_mode="reflect") + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(UNIVNET_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=UnivNetModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_features: torch.FloatTensor, + noise_sequence: Optional[torch.FloatTensor] = None, + padding_mask: Optional[torch.FloatTensor] = None, + generator: Optional[torch.Generator] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], UnivNetModelOutput]: + r""" + Returns: + + Example: + + ```python + >>> from transformers import UnivNetFeatureExtractor, UnivNetModel + >>> from datasets import load_dataset, Audio + + >>> model = UnivNetModel.from_pretrained("dg845/univnet-dev") + >>> feature_extractor = UnivNetFeatureExtractor.from_pretrained("dg845/univnet-dev") + + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> # Resample the audio to the feature extractor's sampling rate. + >>> ds = ds.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate)) + >>> inputs = feature_extractor( + ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" + ... ) + >>> audio = model(**inputs).waveforms + >>> list(audio.shape) + [1, 140288] + ``` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # Resolve batch sizes for noise_sequence and spectrogram + spectrogram_batched = input_features.dim() == 3 + if not spectrogram_batched: + input_features = input_features.unsqueeze(0) + spectrogram_batch_size, spectrogram_length, _ = input_features.shape + + if noise_sequence is not None: + noise_sequence_batched = noise_sequence.dim() == 3 + if not noise_sequence_batched: + noise_sequence = noise_sequence.unsqueeze(0) + else: + # Randomly generate noise_sequence + noise_sequence_shape = (spectrogram_batch_size, spectrogram_length, self.config.model_in_channels) + noise_sequence = torch.randn( + noise_sequence_shape, generator=generator, dtype=input_features.dtype, device=input_features.device + ) + noise_sequence_batch_size = noise_sequence.shape[0] + + if spectrogram_batch_size > 1 and noise_sequence_batch_size == 1: + # Repeat noise_sequence spectrogram_batch_size times + noise_sequence = noise_sequence.repeat(spectrogram_batch_size, 1, 1) + elif noise_sequence_batch_size > 1 and spectrogram_batch_size == 1: + # Repeat spectrogram noise_sequence_batch_size times + input_features = input_features.repeat(noise_sequence_batch_size, 1, 1) + + if noise_sequence_batch_size != spectrogram_batch_size: + raise ValueError( + f"The batch size of `noise_sequence` is {noise_sequence_batch_size} and the batch size of" + f" `input_features` is {spectrogram_batch_size}, but the two are expected to be equal." + ) + + if padding_mask is not None: + if padding_mask.dim() == 1: + padding_mask = padding_mask.unsqueeze(0) + padding_mask_batch_size = padding_mask.shape[0] + if padding_mask_batch_size != spectrogram_batch_size: + raise ValueError( + f"The batch size of `padding_mask` is {padding_mask_batch_size} and the batch size of" + f" `input_features` is {spectrogram_batch_size}, but the two are expected to be equal." + ) + + # Change shapes to have channels before sequence lengths + hidden_states = noise_sequence.transpose(2, 1) + input_features = input_features.transpose(2, 1) + + hidden_states = self.conv_pre(hidden_states) + + for resblock in self.resblocks: + hidden_states = resblock(hidden_states, input_features) + + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = self.conv_post(hidden_states) + hidden_states = torch.tanh(hidden_states) + + # Remove sequence length dimension since this collapses to 1 + # NOTE: keep waveforms batched even if there's only one + waveform = hidden_states.squeeze(1) + + # Get sequence lengths for UnivNetFeatureExtractor.batch_decode. + waveform_lengths = None + if padding_mask is not None: + # Padding is always contiguous and added on the right + waveform_lengths = torch.sum(padding_mask, dim=1) + + if not return_dict: + outputs = (waveform, waveform_lengths) + return outputs + + return UnivNetModelOutput( + waveforms=waveform, + waveform_lengths=waveform_lengths, + ) + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear, nn.Conv1d, nn.ConvTranspose1d)): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + + def apply_weight_norm(self): + nn.utils.weight_norm(self.conv_pre) + for layer in self.resblocks: + layer.apply_weight_norm() + nn.utils.weight_norm(self.conv_post) + + def remove_weight_norm(self): + nn.utils.remove_weight_norm(self.conv_pre) + for layer in self.resblocks: + layer.remove_weight_norm() + nn.utils.remove_weight_norm(self.conv_post) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b4270ae650c801c6d6b07cd3572bd9dd929ac66 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/configuration_yoso.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/configuration_yoso.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..707355a8cd7134a9b8a9a70c83836ae35f7057c2 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/configuration_yoso.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/modeling_yoso.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/modeling_yoso.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62c2ad3e5caff42fb4f0abf0a8fb2b0f901cc666 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/modeling_yoso.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..be46a4de81b30cff5c826bd9f298b2ee7a8fecbb --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert YOSO checkpoints from the original repository. URL: https://github.com/mlpen/YOSO""" + +import argparse + +import torch + +from transformers import YosoConfig, YosoForMaskedLM + + +def rename_key(orig_key): + if "model" in orig_key: + orig_key = orig_key.replace("model.", "") + if "norm1" in orig_key: + orig_key = orig_key.replace("norm1", "attention.output.LayerNorm") + if "norm2" in orig_key: + orig_key = orig_key.replace("norm2", "output.LayerNorm") + if "norm" in orig_key: + orig_key = orig_key.replace("norm", "LayerNorm") + if "transformer" in orig_key: + layer_num = orig_key.split(".")[0].split("_")[-1] + orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}") + if "mha.attn" in orig_key: + orig_key = orig_key.replace("mha.attn", "attention.self") + if "mha" in orig_key: + orig_key = orig_key.replace("mha", "attention") + if "W_q" in orig_key: + orig_key = orig_key.replace("W_q", "self.query") + if "W_k" in orig_key: + orig_key = orig_key.replace("W_k", "self.key") + if "W_v" in orig_key: + orig_key = orig_key.replace("W_v", "self.value") + if "ff1" in orig_key: + orig_key = orig_key.replace("ff1", "intermediate.dense") + if "ff2" in orig_key: + orig_key = orig_key.replace("ff2", "output.dense") + if "ff" in orig_key: + orig_key = orig_key.replace("ff", "output.dense") + if "mlm_class" in orig_key: + orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder") + if "mlm" in orig_key: + orig_key = orig_key.replace("mlm", "cls.predictions.transform") + if "cls" not in orig_key: + orig_key = "yoso." + orig_key + + return orig_key + + +def convert_checkpoint_helper(max_position_embeddings, orig_state_dict): + for key in orig_state_dict.copy().keys(): + val = orig_state_dict.pop(key) + + if ("pooler" in key) or ("sen_class" in key): + continue + else: + orig_state_dict[rename_key(key)] = val + + orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"] + orig_state_dict["yoso.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2 + + return orig_state_dict + + +def convert_yoso_checkpoint(checkpoint_path, yoso_config_file, pytorch_dump_path): + orig_state_dict = torch.load(checkpoint_path, map_location="cpu")["model_state_dict"] + config = YosoConfig.from_json_file(yoso_config_file) + model = YosoForMaskedLM(config) + + new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict) + + print(model.load_state_dict(new_state_dict)) + model.eval() + model.save_pretrained(pytorch_dump_path) + + print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." + ) + parser.add_argument( + "--config_file", + default=None, + type=str, + required=True, + help="The json file for YOSO model config.", + ) + parser.add_argument( + "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." + ) + args = parser.parse_args() + convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/modeling_yoso.py b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/modeling_yoso.py new file mode 100644 index 0000000000000000000000000000000000000000..4e08b999ad30748b8949a6b24fed8b5efac194ab --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/transformers/models/yoso/modeling_yoso.py @@ -0,0 +1,1304 @@ +# coding=utf-8 +# Copyright 2022 University of Wisconsin-Madison and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch YOSO model.""" + + +import math +from pathlib import Path +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging +from .configuration_yoso import YosoConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "uw-madison/yoso-4096" +_CONFIG_FOR_DOC = "YosoConfig" + +YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "uw-madison/yoso-4096", + # See all YOSO models at https://huggingface.co/models?filter=yoso +] + + +def load_cuda_kernels(): + global lsh_cumulation + try: + from torch.utils.cpp_extension import load + + def append_root(files): + src_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "yoso" + return [src_folder / file for file in files] + + src_files = append_root( + ["fast_lsh_cumulation_torch.cpp", "fast_lsh_cumulation.cu", "fast_lsh_cumulation_cuda.cu"] + ) + + load("fast_lsh_cumulation", src_files, verbose=True) + + import fast_lsh_cumulation as lsh_cumulation + + return True + except Exception: + lsh_cumulation = None + return False + + +def to_contiguous(input_tensors): + if isinstance(input_tensors, list): + out = [] + for tensor in input_tensors: + if not tensor.is_contiguous(): + tensor = tensor.contiguous() + out.append(tensor) + return out + else: + if not input_tensors.is_contiguous(): + input_tensors = input_tensors.contiguous() + return input_tensors + + +def normalize(input_tensors): + if isinstance(input_tensors, list): + out = [] + for tensor in input_tensors: + out.append(nn.functional.normalize(tensor, p=2, dim=-1)) + return out + else: + return nn.functional.normalize(input_tensors, p=2, dim=-1) + + +def hashing(query, key, num_hash, hash_len): + if len(query.size()) != 3: + raise ValueError("Query has incorrect size.") + if len(key.size()) != 3: + raise ValueError("Key has incorrect size.") + + rmat = torch.randn(query.size(0), query.size(2), num_hash * hash_len, device=query.device) + raise_pow = 2 ** torch.arange(hash_len, device=query.device) + + query_projection = torch.matmul(query, rmat).reshape(query.size(0), query.size(1), num_hash, hash_len) + key_projection = torch.matmul(key, rmat).reshape(key.size(0), key.size(1), num_hash, hash_len) + query_binary = (query_projection > 0).int() + key_binary = (key_projection > 0).int() + query_hash = torch.sum(query_binary * raise_pow, dim=-1) + query_hash = torch.sum(key_binary * raise_pow, dim=-1) + + return query_hash.int(), query_hash.int() + + +class YosoCumulation(torch.autograd.Function): + @staticmethod + def forward(ctx, query_mask, key_mask, query, key, value, config): + hash_code_len = config["hash_code_len"] + + expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len + expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :] + cumulation_value = torch.matmul(expectation, value) + + ctx.save_for_backward(query_mask, key_mask, expectation, query, key, value) + ctx.config = config + + return cumulation_value + + @staticmethod + def backward(ctx, grad): + grad = to_contiguous(grad) + + query_mask, key_mask, expectation, query, key, value = ctx.saved_tensors + config = ctx.config + + hash_code_len = config["hash_code_len"] + + weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation + grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key) + grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query) + grad_value = torch.matmul(expectation.transpose(-1, -2), grad) + + return None, None, grad_query, grad_key, grad_value, None + + +class YosoLSHCumulation(torch.autograd.Function): + @staticmethod + def forward(ctx, query_mask, key_mask, query, key, value, config): + if query_mask.size(0) != key_mask.size(0): + raise ValueError("Query mask and Key mask differ in sizes in dimension 0") + if query_mask.size(0) != query.size(0): + raise ValueError("Query mask and Query differ in sizes in dimension 0") + if query_mask.size(0) != key.size(0): + raise ValueError("Query mask and Key differ in sizes in dimension 0") + if query_mask.size(0) != value.size(0): + raise ValueError("Query mask and Value mask differ in sizes in dimension 0") + if key.size(1) != value.size(1): + raise ValueError("Key and Value differ in sizes in dimension 1") + if query.size(2) != key.size(2): + raise ValueError("Query and Key differ in sizes in dimension 2") + + query_mask, key_mask, query, key, value = to_contiguous([query_mask, key_mask, query, key, value]) + + use_cuda = query_mask.is_cuda + num_hash = config["num_hash"] + hash_code_len = config["hash_code_len"] + hashtable_capacity = int(2**hash_code_len) + + if config["use_fast_hash"]: + query_hash_code, key_hash_code = lsh_cumulation.fast_hash( + query_mask, query, key_mask, key, num_hash, hash_code_len, use_cuda, 1 + ) + else: + query_hash_code, key_hash_code = hashing(query, key, num_hash, hash_code_len) + + cumulation_value = lsh_cumulation.lsh_cumulation( + query_mask, query_hash_code, key_mask, key_hash_code, value, hashtable_capacity, use_cuda, 1 + ) + + ctx.save_for_backward(query_mask, key_mask, query_hash_code, key_hash_code, query, key, value) + ctx.config = config + + return cumulation_value + + @staticmethod + def backward(ctx, grad): + grad = to_contiguous(grad) + + query_mask, key_mask, query_hash_code, key_hash_code, query, key, value = ctx.saved_tensors + config = ctx.config + + use_cuda = grad.is_cuda + hash_code_len = config["hash_code_len"] + hashtable_capacity = int(2**hash_code_len) + + if config["lsh_backward"]: + grad_value = lsh_cumulation.lsh_cumulation( + key_mask, key_hash_code, query_mask, query_hash_code, grad, hashtable_capacity, use_cuda, 1 + ) + grad_query = lsh_cumulation.lsh_weighted_cumulation( + query_mask, + query_hash_code, + grad, + key_mask, + key_hash_code, + value, + (hash_code_len / 2) * key, + hashtable_capacity, + use_cuda, + 4, + ) + grad_key = lsh_cumulation.lsh_weighted_cumulation( + key_mask, + key_hash_code, + value, + query_mask, + query_hash_code, + grad, + (hash_code_len / 2) * query, + hashtable_capacity, + use_cuda, + 4, + ) + else: + expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len + expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :] + weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation + grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key) + grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query) + grad_value = torch.matmul(expectation.transpose(-1, -2), grad) + + return None, None, grad_query, grad_key, grad_value, None + + +# Copied from transformers.models.nystromformer.modeling_nystromformer.NystromformerEmbeddings +class YosoEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2, persistent=False + ) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer( + "token_type_ids", + torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), + persistent=False, + ) + + def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class YosoSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = ( + position_embedding_type if position_embedding_type is not None else config.position_embedding_type + ) + + self.use_expectation = config.use_expectation + self.hash_code_len = config.hash_code_len + self.use_conv = config.conv_window is not None + self.use_fast_hash = config.use_fast_hash + self.num_hash = config.num_hash + self.lsh_backward = config.lsh_backward + + self.lsh_config = { + "hash_code_len": self.hash_code_len, + "use_fast_hash": self.use_fast_hash, + "num_hash": self.num_hash, + "lsh_backward": self.lsh_backward, + } + + if config.conv_window is not None: + self.conv = nn.Conv2d( + in_channels=config.num_attention_heads, + out_channels=config.num_attention_heads, + kernel_size=(config.conv_window, 1), + padding=(config.conv_window // 2, 0), + bias=False, + groups=config.num_attention_heads, + ) + + def transpose_for_scores(self, layer): + new_layer_shape = layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + layer = layer.view(*new_layer_shape) + return layer.permute(0, 2, 1, 3) + + def forward(self, hidden_states, attention_mask=None, output_attentions=False): + mixed_query_layer = self.query(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + + if self.use_conv: + conv_value_layer = self.conv(value_layer * attention_mask[:, None, :, None]) + + batch_size, num_heads, seq_len, head_dim = query_layer.size() + + query_layer = query_layer.reshape(batch_size * num_heads, seq_len, head_dim) + key_layer = key_layer.reshape(batch_size * num_heads, seq_len, head_dim) + value_layer = value_layer.reshape(batch_size * num_heads, seq_len, head_dim) + + # revert changes made by get_extended_attention_mask + attention_mask = 1.0 + attention_mask / 10000.0 + attention_mask = ( + attention_mask.squeeze().repeat(1, num_heads, 1).reshape(batch_size * num_heads, seq_len).int() + ) + + # The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs + # smaller than this are padded with zeros. + gpu_warp_size = 32 + + if (not self.use_expectation) and head_dim < gpu_warp_size: + pad_size = batch_size * num_heads, seq_len, gpu_warp_size - head_dim + + query_layer = torch.cat( + [ + query_layer, + torch.zeros(pad_size, device=query_layer.device), + ], + dim=-1, + ) + key_layer = torch.cat( + [ + key_layer, + torch.zeros(pad_size, device=key_layer.device), + ], + dim=-1, + ) + value_layer = torch.cat( + [ + value_layer, + torch.zeros(pad_size, device=value_layer.device), + ], + dim=-1, + ) + + if self.use_expectation or self.training: + query_layer, key_layer = normalize([query_layer, key_layer]) + + if self.use_expectation: + context_layer = YosoCumulation.apply( + attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config + ) + else: + context_layer = YosoLSHCumulation.apply( + attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config + ) + + if (not self.use_expectation) and head_dim < gpu_warp_size: + context_layer = context_layer[:, :, :head_dim] + + context_layer = normalize(context_layer) + + context_layer = context_layer.reshape(batch_size, num_heads, seq_len, head_dim) + + if self.use_conv: + context_layer += conv_value_layer + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, context_layer) if output_attentions else (context_layer,) + + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput +class YosoSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class YosoAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = YosoSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = YosoSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward(self, hidden_states, attention_mask=None, output_attentions=False): + self_outputs = self.self(hidden_states, attention_mask, output_attentions) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate +class YosoIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput +class YosoOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class YosoLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = YosoAttention(config) + self.add_cross_attention = config.add_cross_attention + self.intermediate = YosoIntermediate(config) + self.output = YosoOutput(config) + + def forward(self, hidden_states, attention_mask=None, output_attentions=False): + self_attention_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class YosoEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([YosoLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + output_attentions, + ) + else: + layer_outputs = layer_module(hidden_states, attention_mask, output_attentions) + + hidden_states = layer_outputs[0] + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + return BaseModelOutputWithCrossAttentions( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform +class YosoPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Yoso +class YosoLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = YosoPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Yoso +class YosoOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = YosoLMPredictionHead(config) + + def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class YosoPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = YosoConfig + base_model_prefix = "yoso" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +YOSO_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`YosoConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +YOSO_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert *input_ids* indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare YOSO Model transformer outputting raw hidden-states without any specific head on top.", + YOSO_START_DOCSTRING, +) +class YosoModel(YosoPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.config = config + + self.embeddings = YosoEmbeddings(config) + self.encoder = YosoEncoder(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + + if not return_dict: + return (sequence_output,) + encoder_outputs[1:] + + return BaseModelOutputWithCrossAttentions( + last_hidden_state=sequence_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings("""YOSO Model with a `language modeling` head on top.""", YOSO_START_DOCSTRING) +class YosoForMaskedLM(YosoPreTrainedModel): + _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.yoso = YosoModel(config) + self.cls = YosoOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, MaskedLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.yoso( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class YosoClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.out_proj = nn.Linear(config.hidden_size, config.num_labels) + + self.config = config + + def forward(self, features, **kwargs): + x = features[:, 0, :] # take token (equiv. to [CLS]) + x = self.dropout(x) + x = self.dense(x) + x = ACT2FN[self.config.hidden_act](x) + x = self.dropout(x) + x = self.out_proj(x) + return x + + +@add_start_docstrings( + """YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of + the pooled output) e.g. for GLUE tasks.""", + YOSO_START_DOCSTRING, +) +class YosoForSequenceClassification(YosoPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.yoso = YosoModel(config) + self.classifier = YosoClassificationHead(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.yoso( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """YOSO Model with a multiple choice classification head on top (a linear layer on top of + the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""", + YOSO_START_DOCSTRING, +) +class YosoForMultipleChoice(YosoPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.yoso = YosoModel(config) + self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size) + self.classifier = nn.Linear(config.hidden_size, 1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, MultipleChoiceModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., + num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See + `input_ids` above) + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + + input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.yoso( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_state = outputs[0] # (bs * num_choices, seq_len, dim) + pooled_output = hidden_state[:, 0] # (bs * num_choices, dim) + pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim) + pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim) + logits = self.classifier(pooled_output) + + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """YOSO Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""", + YOSO_START_DOCSTRING, +) +class YosoForTokenClassification(YosoPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.yoso = YosoModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.yoso( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels) + active_labels = torch.where( + active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) + ) + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """YOSO Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""", + YOSO_START_DOCSTRING, +) +class YosoForQuestionAnswering(YosoPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + config.num_labels = 2 + self.num_labels = config.num_labels + + self.yoso = YosoModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + start_positions: Optional[torch.Tensor] = None, + end_positions: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.yoso( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[1:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..661f3405da108f88f0cadd4286d0053c141de284 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__init__.py @@ -0,0 +1,3338 @@ +""" + designSpaceDocument + + - Read and write designspace files +""" + +from __future__ import annotations + +import collections +import copy +import itertools +import math +import os +import posixpath +from io import BytesIO, StringIO +from textwrap import indent +from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union, cast + +from fontTools.misc import etree as ET +from fontTools.misc import plistlib +from fontTools.misc.loggingTools import LogMixin +from fontTools.misc.textTools import tobytes, tostr + + +__all__ = [ + "AxisDescriptor", + "AxisLabelDescriptor", + "AxisMappingDescriptor", + "BaseDocReader", + "BaseDocWriter", + "DesignSpaceDocument", + "DesignSpaceDocumentError", + "DiscreteAxisDescriptor", + "InstanceDescriptor", + "LocationLabelDescriptor", + "RangeAxisSubsetDescriptor", + "RuleDescriptor", + "SourceDescriptor", + "ValueAxisSubsetDescriptor", + "VariableFontDescriptor", +] + +# ElementTree allows to find namespace-prefixed elements, but not attributes +# so we have to do it ourselves for 'xml:lang' +XML_NS = "{http://www.w3.org/XML/1998/namespace}" +XML_LANG = XML_NS + "lang" + + +def posix(path): + """Normalize paths using forward slash to work also on Windows.""" + new_path = posixpath.join(*path.split(os.path.sep)) + if path.startswith("/"): + # The above transformation loses absolute paths + new_path = "/" + new_path + elif path.startswith(r"\\"): + # The above transformation loses leading slashes of UNC path mounts + new_path = "//" + new_path + return new_path + + +def posixpath_property(private_name): + """Generate a propery that holds a path always using forward slashes.""" + + def getter(self): + # Normal getter + return getattr(self, private_name) + + def setter(self, value): + # The setter rewrites paths using forward slashes + if value is not None: + value = posix(value) + setattr(self, private_name, value) + + return property(getter, setter) + + +class DesignSpaceDocumentError(Exception): + def __init__(self, msg, obj=None): + self.msg = msg + self.obj = obj + + def __str__(self): + return str(self.msg) + (": %r" % self.obj if self.obj is not None else "") + + +class AsDictMixin(object): + def asdict(self): + d = {} + for attr, value in self.__dict__.items(): + if attr.startswith("_"): + continue + if hasattr(value, "asdict"): + value = value.asdict() + elif isinstance(value, list): + value = [v.asdict() if hasattr(v, "asdict") else v for v in value] + d[attr] = value + return d + + +class SimpleDescriptor(AsDictMixin): + """Containers for a bunch of attributes""" + + # XXX this is ugly. The 'print' is inappropriate here, and instead of + # assert, it should simply return True/False + def compare(self, other): + # test if this object contains the same data as the other + for attr in self._attrs: + try: + assert getattr(self, attr) == getattr(other, attr) + except AssertionError: + print( + "failed attribute", + attr, + getattr(self, attr), + "!=", + getattr(other, attr), + ) + + def __repr__(self): + attrs = [f"{a}={repr(getattr(self, a))}," for a in self._attrs] + attrs = indent("\n".join(attrs), " ") + return f"{self.__class__.__name__}(\n{attrs}\n)" + + +class SourceDescriptor(SimpleDescriptor): + """Simple container for data related to the source + + .. code:: python + + doc = DesignSpaceDocument() + s1 = SourceDescriptor() + s1.path = masterPath1 + s1.name = "master.ufo1" + s1.font = defcon.Font("master.ufo1") + s1.location = dict(weight=0) + s1.familyName = "MasterFamilyName" + s1.styleName = "MasterStyleNameOne" + s1.localisedFamilyName = dict(fr="Caractère") + s1.mutedGlyphNames.append("A") + s1.mutedGlyphNames.append("Z") + doc.addSource(s1) + + """ + + flavor = "source" + _attrs = [ + "filename", + "path", + "name", + "layerName", + "location", + "copyLib", + "copyGroups", + "copyFeatures", + "muteKerning", + "muteInfo", + "mutedGlyphNames", + "familyName", + "styleName", + "localisedFamilyName", + ] + + filename = posixpath_property("_filename") + path = posixpath_property("_path") + + def __init__( + self, + *, + filename=None, + path=None, + font=None, + name=None, + location=None, + designLocation=None, + layerName=None, + familyName=None, + styleName=None, + localisedFamilyName=None, + copyLib=False, + copyInfo=False, + copyGroups=False, + copyFeatures=False, + muteKerning=False, + muteInfo=False, + mutedGlyphNames=None, + ): + self.filename = filename + """string. A relative path to the source file, **as it is in the document**. + + MutatorMath + VarLib. + """ + self.path = path + """The absolute path, calculated from filename.""" + + self.font = font + """Any Python object. Optional. Points to a representation of this + source font that is loaded in memory, as a Python object (e.g. a + ``defcon.Font`` or a ``fontTools.ttFont.TTFont``). + + The default document reader will not fill-in this attribute, and the + default writer will not use this attribute. It is up to the user of + ``designspaceLib`` to either load the resource identified by + ``filename`` and store it in this field, or write the contents of + this field to the disk and make ```filename`` point to that. + """ + + self.name = name + """string. Optional. Unique identifier name for this source. + + MutatorMath + varLib. + """ + + self.designLocation = ( + designLocation if designLocation is not None else location or {} + ) + """dict. Axis values for this source, in design space coordinates. + + MutatorMath + varLib. + + This may be only part of the full design location. + See :meth:`getFullDesignLocation()` + + .. versionadded:: 5.0 + """ + + self.layerName = layerName + """string. The name of the layer in the source to look for + outline data. Default ``None`` which means ``foreground``. + """ + self.familyName = familyName + """string. Family name of this source. Though this data + can be extracted from the font, it can be efficient to have it right + here. + + varLib. + """ + self.styleName = styleName + """string. Style name of this source. Though this data + can be extracted from the font, it can be efficient to have it right + here. + + varLib. + """ + self.localisedFamilyName = localisedFamilyName or {} + """dict. A dictionary of localised family name strings, keyed by + language code. + + If present, will be used to build localized names for all instances. + + .. versionadded:: 5.0 + """ + + self.copyLib = copyLib + """bool. Indicates if the contents of the font.lib need to + be copied to the instances. + + MutatorMath. + + .. deprecated:: 5.0 + """ + self.copyInfo = copyInfo + """bool. Indicates if the non-interpolating font.info needs + to be copied to the instances. + + MutatorMath. + + .. deprecated:: 5.0 + """ + self.copyGroups = copyGroups + """bool. Indicates if the groups need to be copied to the + instances. + + MutatorMath. + + .. deprecated:: 5.0 + """ + self.copyFeatures = copyFeatures + """bool. Indicates if the feature text needs to be + copied to the instances. + + MutatorMath. + + .. deprecated:: 5.0 + """ + self.muteKerning = muteKerning + """bool. Indicates if the kerning data from this source + needs to be muted (i.e. not be part of the calculations). + + MutatorMath only. + """ + self.muteInfo = muteInfo + """bool. Indicated if the interpolating font.info data for + this source needs to be muted. + + MutatorMath only. + """ + self.mutedGlyphNames = mutedGlyphNames or [] + """list. Glyphnames that need to be muted in the + instances. + + MutatorMath only. + """ + + @property + def location(self): + """dict. Axis values for this source, in design space coordinates. + + MutatorMath + varLib. + + .. deprecated:: 5.0 + Use the more explicit alias for this property :attr:`designLocation`. + """ + return self.designLocation + + @location.setter + def location(self, location: Optional[SimpleLocationDict]): + self.designLocation = location or {} + + def setFamilyName(self, familyName, languageCode="en"): + """Setter for :attr:`localisedFamilyName` + + .. versionadded:: 5.0 + """ + self.localisedFamilyName[languageCode] = tostr(familyName) + + def getFamilyName(self, languageCode="en"): + """Getter for :attr:`localisedFamilyName` + + .. versionadded:: 5.0 + """ + return self.localisedFamilyName.get(languageCode) + + def getFullDesignLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict: + """Get the complete design location of this source, from its + :attr:`designLocation` and the document's axis defaults. + + .. versionadded:: 5.0 + """ + result: SimpleLocationDict = {} + for axis in doc.axes: + if axis.name in self.designLocation: + result[axis.name] = self.designLocation[axis.name] + else: + result[axis.name] = axis.map_forward(axis.default) + return result + + +class RuleDescriptor(SimpleDescriptor): + """Represents the rule descriptor element: a set of glyph substitutions to + trigger conditionally in some parts of the designspace. + + .. code:: python + + r1 = RuleDescriptor() + r1.name = "unique.rule.name" + r1.conditionSets.append([dict(name="weight", minimum=-10, maximum=10), dict(...)]) + r1.conditionSets.append([dict(...), dict(...)]) + r1.subs.append(("a", "a.alt")) + + .. code:: xml + + + + + + + + + + + + + + """ + + _attrs = ["name", "conditionSets", "subs"] # what do we need here + + def __init__(self, *, name=None, conditionSets=None, subs=None): + self.name = name + """string. Unique name for this rule. Can be used to reference this rule data.""" + # list of lists of dict(name='aaaa', minimum=0, maximum=1000) + self.conditionSets = conditionSets or [] + """a list of conditionsets. + + - Each conditionset is a list of conditions. + - Each condition is a dict with ``name``, ``minimum`` and ``maximum`` keys. + """ + # list of substitutions stored as tuples of glyphnames ("a", "a.alt") + self.subs = subs or [] + """list of substitutions. + + - Each substitution is stored as tuples of glyphnames, e.g. ("a", "a.alt"). + - Note: By default, rules are applied first, before other text + shaping/OpenType layout, as they are part of the + `Required Variation Alternates OpenType feature `_. + See ref:`rules-element` § Attributes. + """ + + +def evaluateRule(rule, location): + """Return True if any of the rule's conditionsets matches the given location.""" + return any(evaluateConditions(c, location) for c in rule.conditionSets) + + +def evaluateConditions(conditions, location): + """Return True if all the conditions matches the given location. + + - If a condition has no minimum, check for < maximum. + - If a condition has no maximum, check for > minimum. + """ + for cd in conditions: + value = location[cd["name"]] + if cd.get("minimum") is None: + if value > cd["maximum"]: + return False + elif cd.get("maximum") is None: + if cd["minimum"] > value: + return False + elif not cd["minimum"] <= value <= cd["maximum"]: + return False + return True + + +def processRules(rules, location, glyphNames): + """Apply these rules at this location to these glyphnames. + + Return a new list of glyphNames with substitutions applied. + + - rule order matters + """ + newNames = [] + for rule in rules: + if evaluateRule(rule, location): + for name in glyphNames: + swap = False + for a, b in rule.subs: + if name == a: + swap = True + break + if swap: + newNames.append(b) + else: + newNames.append(name) + glyphNames = newNames + newNames = [] + return glyphNames + + +AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]] +SimpleLocationDict = Dict[str, float] + + +class AxisMappingDescriptor(SimpleDescriptor): + """Represents the axis mapping element: mapping an input location + to an output location in the designspace. + + .. code:: python + + m1 = AxisMappingDescriptor() + m1.inputLocation = {"weight": 900, "width": 150} + m1.outputLocation = {"weight": 870} + + .. code:: xml + + + + + + + + + + + + + """ + + _attrs = ["inputLocation", "outputLocation"] + + def __init__( + self, + *, + inputLocation=None, + outputLocation=None, + description=None, + groupDescription=None, + ): + self.inputLocation: SimpleLocationDict = inputLocation or {} + """dict. Axis values for the input of the mapping, in design space coordinates. + + varLib. + + .. versionadded:: 5.1 + """ + self.outputLocation: SimpleLocationDict = outputLocation or {} + """dict. Axis values for the output of the mapping, in design space coordinates. + + varLib. + + .. versionadded:: 5.1 + """ + self.description = description + """string. A description of the mapping. + + varLib. + + .. versionadded:: 5.2 + """ + self.groupDescription = groupDescription + """string. A description of the group of mappings. + + varLib. + + .. versionadded:: 5.2 + """ + + +class InstanceDescriptor(SimpleDescriptor): + """Simple container for data related to the instance + + + .. code:: python + + i2 = InstanceDescriptor() + i2.path = instancePath2 + i2.familyName = "InstanceFamilyName" + i2.styleName = "InstanceStyleName" + i2.name = "instance.ufo2" + # anisotropic location + i2.designLocation = dict(weight=500, width=(400,300)) + i2.postScriptFontName = "InstancePostscriptName" + i2.styleMapFamilyName = "InstanceStyleMapFamilyName" + i2.styleMapStyleName = "InstanceStyleMapStyleName" + i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever' + doc.addInstance(i2) + """ + + flavor = "instance" + _defaultLanguageCode = "en" + _attrs = [ + "filename", + "path", + "name", + "locationLabel", + "designLocation", + "userLocation", + "familyName", + "styleName", + "postScriptFontName", + "styleMapFamilyName", + "styleMapStyleName", + "localisedFamilyName", + "localisedStyleName", + "localisedStyleMapFamilyName", + "localisedStyleMapStyleName", + "glyphs", + "kerning", + "info", + "lib", + ] + + filename = posixpath_property("_filename") + path = posixpath_property("_path") + + def __init__( + self, + *, + filename=None, + path=None, + font=None, + name=None, + location=None, + locationLabel=None, + designLocation=None, + userLocation=None, + familyName=None, + styleName=None, + postScriptFontName=None, + styleMapFamilyName=None, + styleMapStyleName=None, + localisedFamilyName=None, + localisedStyleName=None, + localisedStyleMapFamilyName=None, + localisedStyleMapStyleName=None, + glyphs=None, + kerning=True, + info=True, + lib=None, + ): + self.filename = filename + """string. Relative path to the instance file, **as it is + in the document**. The file may or may not exist. + + MutatorMath + VarLib. + """ + self.path = path + """string. Absolute path to the instance file, calculated from + the document path and the string in the filename attr. The file may + or may not exist. + + MutatorMath. + """ + self.font = font + """Same as :attr:`SourceDescriptor.font` + + .. seealso:: :attr:`SourceDescriptor.font` + """ + self.name = name + """string. Unique identifier name of the instance, used to + identify it if it needs to be referenced from elsewhere in the + document. + """ + self.locationLabel = locationLabel + """Name of a :class:`LocationLabelDescriptor`. If + provided, the instance should have the same location as the + LocationLabel. + + .. seealso:: + :meth:`getFullDesignLocation` + :meth:`getFullUserLocation` + + .. versionadded:: 5.0 + """ + self.designLocation: AnisotropicLocationDict = ( + designLocation if designLocation is not None else (location or {}) + ) + """dict. Axis values for this instance, in design space coordinates. + + MutatorMath + varLib. + + .. seealso:: This may be only part of the full location. See: + :meth:`getFullDesignLocation` + :meth:`getFullUserLocation` + + .. versionadded:: 5.0 + """ + self.userLocation: SimpleLocationDict = userLocation or {} + """dict. Axis values for this instance, in user space coordinates. + + MutatorMath + varLib. + + .. seealso:: This may be only part of the full location. See: + :meth:`getFullDesignLocation` + :meth:`getFullUserLocation` + + .. versionadded:: 5.0 + """ + self.familyName = familyName + """string. Family name of this instance. + + MutatorMath + varLib. + """ + self.styleName = styleName + """string. Style name of this instance. + + MutatorMath + varLib. + """ + self.postScriptFontName = postScriptFontName + """string. Postscript fontname for this instance. + + MutatorMath + varLib. + """ + self.styleMapFamilyName = styleMapFamilyName + """string. StyleMap familyname for this instance. + + MutatorMath + varLib. + """ + self.styleMapStyleName = styleMapStyleName + """string. StyleMap stylename for this instance. + + MutatorMath + varLib. + """ + self.localisedFamilyName = localisedFamilyName or {} + """dict. A dictionary of localised family name + strings, keyed by language code. + """ + self.localisedStyleName = localisedStyleName or {} + """dict. A dictionary of localised stylename + strings, keyed by language code. + """ + self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {} + """A dictionary of localised style map + familyname strings, keyed by language code. + """ + self.localisedStyleMapStyleName = localisedStyleMapStyleName or {} + """A dictionary of localised style map + stylename strings, keyed by language code. + """ + self.glyphs = glyphs or {} + """dict for special master definitions for glyphs. If glyphs + need special masters (to record the results of executed rules for + example). + + MutatorMath. + + .. deprecated:: 5.0 + Use rules or sparse sources instead. + """ + self.kerning = kerning + """ bool. Indicates if this instance needs its kerning + calculated. + + MutatorMath. + + .. deprecated:: 5.0 + """ + self.info = info + """bool. Indicated if this instance needs the interpolating + font.info calculated. + + .. deprecated:: 5.0 + """ + + self.lib = lib or {} + """Custom data associated with this instance.""" + + @property + def location(self): + """dict. Axis values for this instance. + + MutatorMath + varLib. + + .. deprecated:: 5.0 + Use the more explicit alias for this property :attr:`designLocation`. + """ + return self.designLocation + + @location.setter + def location(self, location: Optional[AnisotropicLocationDict]): + self.designLocation = location or {} + + def setStyleName(self, styleName, languageCode="en"): + """These methods give easier access to the localised names.""" + self.localisedStyleName[languageCode] = tostr(styleName) + + def getStyleName(self, languageCode="en"): + return self.localisedStyleName.get(languageCode) + + def setFamilyName(self, familyName, languageCode="en"): + self.localisedFamilyName[languageCode] = tostr(familyName) + + def getFamilyName(self, languageCode="en"): + return self.localisedFamilyName.get(languageCode) + + def setStyleMapStyleName(self, styleMapStyleName, languageCode="en"): + self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) + + def getStyleMapStyleName(self, languageCode="en"): + return self.localisedStyleMapStyleName.get(languageCode) + + def setStyleMapFamilyName(self, styleMapFamilyName, languageCode="en"): + self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) + + def getStyleMapFamilyName(self, languageCode="en"): + return self.localisedStyleMapFamilyName.get(languageCode) + + def clearLocation(self, axisName: Optional[str] = None): + """Clear all location-related fields. Ensures that + :attr:``designLocation`` and :attr:``userLocation`` are dictionaries + (possibly empty if clearing everything). + + In order to update the location of this instance wholesale, a user + should first clear all the fields, then change the field(s) for which + they have data. + + .. code:: python + + instance.clearLocation() + instance.designLocation = {'Weight': (34, 36.5), 'Width': 100} + instance.userLocation = {'Opsz': 16} + + In order to update a single axis location, the user should only clear + that axis, then edit the values: + + .. code:: python + + instance.clearLocation('Weight') + instance.designLocation['Weight'] = (34, 36.5) + + Args: + axisName: if provided, only clear the location for that axis. + + .. versionadded:: 5.0 + """ + self.locationLabel = None + if axisName is None: + self.designLocation = {} + self.userLocation = {} + else: + if self.designLocation is None: + self.designLocation = {} + if axisName in self.designLocation: + del self.designLocation[axisName] + if self.userLocation is None: + self.userLocation = {} + if axisName in self.userLocation: + del self.userLocation[axisName] + + def getLocationLabelDescriptor( + self, doc: "DesignSpaceDocument" + ) -> Optional[LocationLabelDescriptor]: + """Get the :class:`LocationLabelDescriptor` instance that matches + this instances's :attr:`locationLabel`. + + Raises if the named label can't be found. + + .. versionadded:: 5.0 + """ + if self.locationLabel is None: + return None + label = doc.getLocationLabel(self.locationLabel) + if label is None: + raise DesignSpaceDocumentError( + "InstanceDescriptor.getLocationLabelDescriptor(): " + f"unknown location label `{self.locationLabel}` in instance `{self.name}`." + ) + return label + + def getFullDesignLocation( + self, doc: "DesignSpaceDocument" + ) -> AnisotropicLocationDict: + """Get the complete design location of this instance, by combining data + from the various location fields, default axis values and mappings, and + top-level location labels. + + The source of truth for this instance's location is determined for each + axis independently by taking the first not-None field in this list: + + - ``locationLabel``: the location along this axis is the same as the + matching STAT format 4 label. No anisotropy. + - ``designLocation[axisName]``: the explicit design location along this + axis, possibly anisotropic. + - ``userLocation[axisName]``: the explicit user location along this + axis. No anisotropy. + - ``axis.default``: default axis value. No anisotropy. + + .. versionadded:: 5.0 + """ + label = self.getLocationLabelDescriptor(doc) + if label is not None: + return doc.map_forward(label.userLocation) # type: ignore + result: AnisotropicLocationDict = {} + for axis in doc.axes: + if axis.name in self.designLocation: + result[axis.name] = self.designLocation[axis.name] + elif axis.name in self.userLocation: + result[axis.name] = axis.map_forward(self.userLocation[axis.name]) + else: + result[axis.name] = axis.map_forward(axis.default) + return result + + def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict: + """Get the complete user location for this instance. + + .. seealso:: :meth:`getFullDesignLocation` + + .. versionadded:: 5.0 + """ + return doc.map_backward(self.getFullDesignLocation(doc)) + + +def tagForAxisName(name): + # try to find or make a tag name for this axis name + names = { + "weight": ("wght", dict(en="Weight")), + "width": ("wdth", dict(en="Width")), + "optical": ("opsz", dict(en="Optical Size")), + "slant": ("slnt", dict(en="Slant")), + "italic": ("ital", dict(en="Italic")), + } + if name.lower() in names: + return names[name.lower()] + if len(name) < 4: + tag = name + "*" * (4 - len(name)) + else: + tag = name[:4] + return tag, dict(en=name) + + +class AbstractAxisDescriptor(SimpleDescriptor): + flavor = "axis" + + def __init__( + self, + *, + tag=None, + name=None, + labelNames=None, + hidden=False, + map=None, + axisOrdering=None, + axisLabels=None, + ): + # opentype tag for this axis + self.tag = tag + """string. Four letter tag for this axis. Some might be + registered at the `OpenType + specification `__. + Privately-defined axis tags must begin with an uppercase letter and + use only uppercase letters or digits. + """ + # name of the axis used in locations + self.name = name + """string. Name of the axis as it is used in the location dicts. + + MutatorMath + varLib. + """ + # names for UI purposes, if this is not a standard axis, + self.labelNames = labelNames or {} + """dict. When defining a non-registered axis, it will be + necessary to define user-facing readable names for the axis. Keyed by + xml:lang code. Values are required to be ``unicode`` strings, even if + they only contain ASCII characters. + """ + self.hidden = hidden + """bool. Whether this axis should be hidden in user interfaces. + """ + self.map = map or [] + """list of input / output values that can describe a warp of user space + to design space coordinates. If no map values are present, it is assumed + user space is the same as design space, as in [(minimum, minimum), + (maximum, maximum)]. + + varLib. + """ + self.axisOrdering = axisOrdering + """STAT table field ``axisOrdering``. + + See: `OTSpec STAT Axis Record `_ + + .. versionadded:: 5.0 + """ + self.axisLabels: List[AxisLabelDescriptor] = axisLabels or [] + """STAT table entries for Axis Value Tables format 1, 2, 3. + + See: `OTSpec STAT Axis Value Tables `_ + + .. versionadded:: 5.0 + """ + + +class AxisDescriptor(AbstractAxisDescriptor): + """Simple container for the axis data. + + Add more localisations? + + .. code:: python + + a1 = AxisDescriptor() + a1.minimum = 1 + a1.maximum = 1000 + a1.default = 400 + a1.name = "weight" + a1.tag = "wght" + a1.labelNames['fa-IR'] = "قطر" + a1.labelNames['en'] = "Wéíght" + a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)] + a1.axisOrdering = 1 + a1.axisLabels = [ + AxisLabelDescriptor(name="Regular", userValue=400, elidable=True) + ] + doc.addAxis(a1) + """ + + _attrs = [ + "tag", + "name", + "maximum", + "minimum", + "default", + "map", + "axisOrdering", + "axisLabels", + ] + + def __init__( + self, + *, + tag=None, + name=None, + labelNames=None, + minimum=None, + default=None, + maximum=None, + hidden=False, + map=None, + axisOrdering=None, + axisLabels=None, + ): + super().__init__( + tag=tag, + name=name, + labelNames=labelNames, + hidden=hidden, + map=map, + axisOrdering=axisOrdering, + axisLabels=axisLabels, + ) + self.minimum = minimum + """number. The minimum value for this axis in user space. + + MutatorMath + varLib. + """ + self.maximum = maximum + """number. The maximum value for this axis in user space. + + MutatorMath + varLib. + """ + self.default = default + """number. The default value for this axis, i.e. when a new location is + created, this is the value this axis will get in user space. + + MutatorMath + varLib. + """ + + def serialize(self): + # output to a dict, used in testing + return dict( + tag=self.tag, + name=self.name, + labelNames=self.labelNames, + maximum=self.maximum, + minimum=self.minimum, + default=self.default, + hidden=self.hidden, + map=self.map, + axisOrdering=self.axisOrdering, + axisLabels=self.axisLabels, + ) + + def map_forward(self, v): + """Maps value from axis mapping's input (user) to output (design).""" + from fontTools.varLib.models import piecewiseLinearMap + + if not self.map: + return v + return piecewiseLinearMap(v, {k: v for k, v in self.map}) + + def map_backward(self, v): + """Maps value from axis mapping's output (design) to input (user).""" + from fontTools.varLib.models import piecewiseLinearMap + + if isinstance(v, tuple): + v = v[0] + if not self.map: + return v + return piecewiseLinearMap(v, {v: k for k, v in self.map}) + + +class DiscreteAxisDescriptor(AbstractAxisDescriptor): + """Container for discrete axis data. + + Use this for axes that do not interpolate. The main difference from a + continuous axis is that a continuous axis has a ``minimum`` and ``maximum``, + while a discrete axis has a list of ``values``. + + Example: an Italic axis with 2 stops, Roman and Italic, that are not + compatible. The axis still allows to bind together the full font family, + which is useful for the STAT table, however it can't become a variation + axis in a VF. + + .. code:: python + + a2 = DiscreteAxisDescriptor() + a2.values = [0, 1] + a2.default = 0 + a2.name = "Italic" + a2.tag = "ITAL" + a2.labelNames['fr'] = "Italique" + a2.map = [(0, 0), (1, -11)] + a2.axisOrdering = 2 + a2.axisLabels = [ + AxisLabelDescriptor(name="Roman", userValue=0, elidable=True) + ] + doc.addAxis(a2) + + .. versionadded:: 5.0 + """ + + flavor = "axis" + _attrs = ("tag", "name", "values", "default", "map", "axisOrdering", "axisLabels") + + def __init__( + self, + *, + tag=None, + name=None, + labelNames=None, + values=None, + default=None, + hidden=False, + map=None, + axisOrdering=None, + axisLabels=None, + ): + super().__init__( + tag=tag, + name=name, + labelNames=labelNames, + hidden=hidden, + map=map, + axisOrdering=axisOrdering, + axisLabels=axisLabels, + ) + self.default: float = default + """The default value for this axis, i.e. when a new location is + created, this is the value this axis will get in user space. + + However, this default value is less important than in continuous axes: + + - it doesn't define the "neutral" version of outlines from which + deltas would apply, as this axis does not interpolate. + - it doesn't provide the reference glyph set for the designspace, as + fonts at each value can have different glyph sets. + """ + self.values: List[float] = values or [] + """List of possible values for this axis. Contrary to continuous axes, + only the values in this list can be taken by the axis, nothing in-between. + """ + + def map_forward(self, value): + """Maps value from axis mapping's input to output. + + Returns value unchanged if no mapping entry is found. + + Note: for discrete axes, each value must have its mapping entry, if + you intend that value to be mapped. + """ + return next((v for k, v in self.map if k == value), value) + + def map_backward(self, value): + """Maps value from axis mapping's output to input. + + Returns value unchanged if no mapping entry is found. + + Note: for discrete axes, each value must have its mapping entry, if + you intend that value to be mapped. + """ + if isinstance(value, tuple): + value = value[0] + return next((k for k, v in self.map if v == value), value) + + +class AxisLabelDescriptor(SimpleDescriptor): + """Container for axis label data. + + Analogue of OpenType's STAT data for a single axis (formats 1, 2 and 3). + All values are user values. + See: `OTSpec STAT Axis value table, format 1, 2, 3 `_ + + The STAT format of the Axis value depends on which field are filled-in, + see :meth:`getFormat` + + .. versionadded:: 5.0 + """ + + flavor = "label" + _attrs = ( + "userMinimum", + "userValue", + "userMaximum", + "name", + "elidable", + "olderSibling", + "linkedUserValue", + "labelNames", + ) + + def __init__( + self, + *, + name, + userValue, + userMinimum=None, + userMaximum=None, + elidable=False, + olderSibling=False, + linkedUserValue=None, + labelNames=None, + ): + self.userMinimum: Optional[float] = userMinimum + """STAT field ``rangeMinValue`` (format 2).""" + self.userValue: float = userValue + """STAT field ``value`` (format 1, 3) or ``nominalValue`` (format 2).""" + self.userMaximum: Optional[float] = userMaximum + """STAT field ``rangeMaxValue`` (format 2).""" + self.name: str = name + """Label for this axis location, STAT field ``valueNameID``.""" + self.elidable: bool = elidable + """STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. + + See: `OTSpec STAT Flags `_ + """ + self.olderSibling: bool = olderSibling + """STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. + + See: `OTSpec STAT Flags `_ + """ + self.linkedUserValue: Optional[float] = linkedUserValue + """STAT field ``linkedValue`` (format 3).""" + self.labelNames: MutableMapping[str, str] = labelNames or {} + """User-facing translations of this location's label. Keyed by + ``xml:lang`` code. + """ + + def getFormat(self) -> int: + """Determine which format of STAT Axis value to use to encode this label. + + =========== ========= =========== =========== =============== + STAT Format userValue userMinimum userMaximum linkedUserValue + =========== ========= =========== =========== =============== + 1 ✅ ❌ ❌ ❌ + 2 ✅ ✅ ✅ ❌ + 3 ✅ ❌ ❌ ✅ + =========== ========= =========== =========== =============== + """ + if self.linkedUserValue is not None: + return 3 + if self.userMinimum is not None or self.userMaximum is not None: + return 2 + return 1 + + @property + def defaultName(self) -> str: + """Return the English name from :attr:`labelNames` or the :attr:`name`.""" + return self.labelNames.get("en") or self.name + + +class LocationLabelDescriptor(SimpleDescriptor): + """Container for location label data. + + Analogue of OpenType's STAT data for a free-floating location (format 4). + All values are user values. + + See: `OTSpec STAT Axis value table, format 4 `_ + + .. versionadded:: 5.0 + """ + + flavor = "label" + _attrs = ("name", "elidable", "olderSibling", "userLocation", "labelNames") + + def __init__( + self, + *, + name, + userLocation, + elidable=False, + olderSibling=False, + labelNames=None, + ): + self.name: str = name + """Label for this named location, STAT field ``valueNameID``.""" + self.userLocation: SimpleLocationDict = userLocation or {} + """Location in user coordinates along each axis. + + If an axis is not mentioned, it is assumed to be at its default location. + + .. seealso:: This may be only part of the full location. See: + :meth:`getFullUserLocation` + """ + self.elidable: bool = elidable + """STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. + + See: `OTSpec STAT Flags `_ + """ + self.olderSibling: bool = olderSibling + """STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. + + See: `OTSpec STAT Flags `_ + """ + self.labelNames: Dict[str, str] = labelNames or {} + """User-facing translations of this location's label. Keyed by + xml:lang code. + """ + + @property + def defaultName(self) -> str: + """Return the English name from :attr:`labelNames` or the :attr:`name`.""" + return self.labelNames.get("en") or self.name + + def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict: + """Get the complete user location of this label, by combining data + from the explicit user location and default axis values. + + .. versionadded:: 5.0 + """ + return { + axis.name: self.userLocation.get(axis.name, axis.default) + for axis in doc.axes + } + + +class VariableFontDescriptor(SimpleDescriptor): + """Container for variable fonts, sub-spaces of the Designspace. + + Use-cases: + + - From a single DesignSpace with discrete axes, define 1 variable font + per value on the discrete axes. Before version 5, you would have needed + 1 DesignSpace per such variable font, and a lot of data duplication. + - From a big variable font with many axes, define subsets of that variable + font that only include some axes and freeze other axes at a given location. + + .. versionadded:: 5.0 + """ + + flavor = "variable-font" + _attrs = ("filename", "axisSubsets", "lib") + + filename = posixpath_property("_filename") + + def __init__(self, *, name, filename=None, axisSubsets=None, lib=None): + self.name: str = name + """string, required. Name of this variable to identify it during the + build process and from other parts of the document, and also as a + filename in case the filename property is empty. + + VarLib. + """ + self.filename: str = filename + """string, optional. Relative path to the variable font file, **as it is + in the document**. The file may or may not exist. + + If not specified, the :attr:`name` will be used as a basename for the file. + """ + self.axisSubsets: List[ + Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor] + ] = (axisSubsets or []) + """Axis subsets to include in this variable font. + + If an axis is not mentioned, assume that we only want the default + location of that axis (same as a :class:`ValueAxisSubsetDescriptor`). + """ + self.lib: MutableMapping[str, Any] = lib or {} + """Custom data associated with this variable font.""" + + +class RangeAxisSubsetDescriptor(SimpleDescriptor): + """Subset of a continuous axis to include in a variable font. + + .. versionadded:: 5.0 + """ + + flavor = "axis-subset" + _attrs = ("name", "userMinimum", "userDefault", "userMaximum") + + def __init__( + self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf + ): + self.name: str = name + """Name of the :class:`AxisDescriptor` to subset.""" + self.userMinimum: float = userMinimum + """New minimum value of the axis in the target variable font. + If not specified, assume the same minimum value as the full axis. + (default = ``-math.inf``) + """ + self.userDefault: Optional[float] = userDefault + """New default value of the axis in the target variable font. + If not specified, assume the same default value as the full axis. + (default = ``None``) + """ + self.userMaximum: float = userMaximum + """New maximum value of the axis in the target variable font. + If not specified, assume the same maximum value as the full axis. + (default = ``math.inf``) + """ + + +class ValueAxisSubsetDescriptor(SimpleDescriptor): + """Single value of a discrete or continuous axis to use in a variable font. + + .. versionadded:: 5.0 + """ + + flavor = "axis-subset" + _attrs = ("name", "userValue") + + def __init__(self, *, name, userValue): + self.name: str = name + """Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor` + to "snapshot" or "freeze". + """ + self.userValue: float = userValue + """Value in user coordinates at which to freeze the given axis.""" + + +class BaseDocWriter(object): + _whiteSpace = " " + axisDescriptorClass = AxisDescriptor + discreteAxisDescriptorClass = DiscreteAxisDescriptor + axisLabelDescriptorClass = AxisLabelDescriptor + axisMappingDescriptorClass = AxisMappingDescriptor + locationLabelDescriptorClass = LocationLabelDescriptor + ruleDescriptorClass = RuleDescriptor + sourceDescriptorClass = SourceDescriptor + variableFontDescriptorClass = VariableFontDescriptor + valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor + rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor + instanceDescriptorClass = InstanceDescriptor + + @classmethod + def getAxisDecriptor(cls): + return cls.axisDescriptorClass() + + @classmethod + def getAxisMappingDescriptor(cls): + return cls.axisMappingDescriptorClass() + + @classmethod + def getSourceDescriptor(cls): + return cls.sourceDescriptorClass() + + @classmethod + def getInstanceDescriptor(cls): + return cls.instanceDescriptorClass() + + @classmethod + def getRuleDescriptor(cls): + return cls.ruleDescriptorClass() + + def __init__(self, documentPath, documentObject: DesignSpaceDocument): + self.path = documentPath + self.documentObject = documentObject + self.effectiveFormatTuple = self._getEffectiveFormatTuple() + self.root = ET.Element("designspace") + + def write(self, pretty=True, encoding="UTF-8", xml_declaration=True): + self.root.attrib["format"] = ".".join(str(i) for i in self.effectiveFormatTuple) + + if ( + self.documentObject.axes + or self.documentObject.axisMappings + or self.documentObject.elidedFallbackName is not None + ): + axesElement = ET.Element("axes") + if self.documentObject.elidedFallbackName is not None: + axesElement.attrib["elidedfallbackname"] = ( + self.documentObject.elidedFallbackName + ) + self.root.append(axesElement) + for axisObject in self.documentObject.axes: + self._addAxis(axisObject) + + if self.documentObject.axisMappings: + mappingsElement = None + lastGroup = object() + for mappingObject in self.documentObject.axisMappings: + if getattr(mappingObject, "groupDescription", None) != lastGroup: + if mappingsElement is not None: + self.root.findall(".axes")[0].append(mappingsElement) + lastGroup = getattr(mappingObject, "groupDescription", None) + mappingsElement = ET.Element("mappings") + if lastGroup is not None: + mappingsElement.attrib["description"] = lastGroup + self._addAxisMapping(mappingsElement, mappingObject) + if mappingsElement is not None: + self.root.findall(".axes")[0].append(mappingsElement) + + if self.documentObject.locationLabels: + labelsElement = ET.Element("labels") + for labelObject in self.documentObject.locationLabels: + self._addLocationLabel(labelsElement, labelObject) + self.root.append(labelsElement) + + if self.documentObject.rules: + if getattr(self.documentObject, "rulesProcessingLast", False): + attributes = {"processing": "last"} + else: + attributes = {} + self.root.append(ET.Element("rules", attributes)) + for ruleObject in self.documentObject.rules: + self._addRule(ruleObject) + + if self.documentObject.sources: + self.root.append(ET.Element("sources")) + for sourceObject in self.documentObject.sources: + self._addSource(sourceObject) + + if self.documentObject.variableFonts: + variableFontsElement = ET.Element("variable-fonts") + for variableFont in self.documentObject.variableFonts: + self._addVariableFont(variableFontsElement, variableFont) + self.root.append(variableFontsElement) + + if self.documentObject.instances: + self.root.append(ET.Element("instances")) + for instanceObject in self.documentObject.instances: + self._addInstance(instanceObject) + + if self.documentObject.lib: + self._addLib(self.root, self.documentObject.lib, 2) + + tree = ET.ElementTree(self.root) + tree.write( + self.path, + encoding=encoding, + method="xml", + xml_declaration=xml_declaration, + pretty_print=pretty, + ) + + def _getEffectiveFormatTuple(self): + """Try to use the version specified in the document, or a sufficiently + recent version to be able to encode what the document contains. + """ + minVersion = self.documentObject.formatTuple + if ( + any( + hasattr(axis, "values") + or axis.axisOrdering is not None + or axis.axisLabels + for axis in self.documentObject.axes + ) + or self.documentObject.locationLabels + or any(source.localisedFamilyName for source in self.documentObject.sources) + or self.documentObject.variableFonts + or any( + instance.locationLabel or instance.userLocation + for instance in self.documentObject.instances + ) + ): + if minVersion < (5, 0): + minVersion = (5, 0) + if self.documentObject.axisMappings: + if minVersion < (5, 1): + minVersion = (5, 1) + return minVersion + + def _makeLocationElement(self, locationObject, name=None): + """Convert Location dict to a locationElement.""" + locElement = ET.Element("location") + if name is not None: + locElement.attrib["name"] = name + validatedLocation = self.documentObject.newDefaultLocation() + for axisName, axisValue in locationObject.items(): + if axisName in validatedLocation: + # only accept values we know + validatedLocation[axisName] = axisValue + for dimensionName, dimensionValue in validatedLocation.items(): + dimElement = ET.Element("dimension") + dimElement.attrib["name"] = dimensionName + if type(dimensionValue) == tuple: + dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue[0]) + dimElement.attrib["yvalue"] = self.intOrFloat(dimensionValue[1]) + else: + dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue) + locElement.append(dimElement) + return locElement, validatedLocation + + def intOrFloat(self, num): + if int(num) == num: + return "%d" % num + return ("%f" % num).rstrip("0").rstrip(".") + + def _addRule(self, ruleObject): + # if none of the conditions have minimum or maximum values, do not add the rule. + ruleElement = ET.Element("rule") + if ruleObject.name is not None: + ruleElement.attrib["name"] = ruleObject.name + for conditions in ruleObject.conditionSets: + conditionsetElement = ET.Element("conditionset") + for cond in conditions: + if cond.get("minimum") is None and cond.get("maximum") is None: + # neither is defined, don't add this condition + continue + conditionElement = ET.Element("condition") + conditionElement.attrib["name"] = cond.get("name") + if cond.get("minimum") is not None: + conditionElement.attrib["minimum"] = self.intOrFloat( + cond.get("minimum") + ) + if cond.get("maximum") is not None: + conditionElement.attrib["maximum"] = self.intOrFloat( + cond.get("maximum") + ) + conditionsetElement.append(conditionElement) + if len(conditionsetElement): + ruleElement.append(conditionsetElement) + for sub in ruleObject.subs: + subElement = ET.Element("sub") + subElement.attrib["name"] = sub[0] + subElement.attrib["with"] = sub[1] + ruleElement.append(subElement) + if len(ruleElement): + self.root.findall(".rules")[0].append(ruleElement) + + def _addAxis(self, axisObject): + axisElement = ET.Element("axis") + axisElement.attrib["tag"] = axisObject.tag + axisElement.attrib["name"] = axisObject.name + self._addLabelNames(axisElement, axisObject.labelNames) + if axisObject.map: + for inputValue, outputValue in axisObject.map: + mapElement = ET.Element("map") + mapElement.attrib["input"] = self.intOrFloat(inputValue) + mapElement.attrib["output"] = self.intOrFloat(outputValue) + axisElement.append(mapElement) + if axisObject.axisOrdering is not None or axisObject.axisLabels: + labelsElement = ET.Element("labels") + if axisObject.axisOrdering is not None: + labelsElement.attrib["ordering"] = str(axisObject.axisOrdering) + for label in axisObject.axisLabels: + self._addAxisLabel(labelsElement, label) + axisElement.append(labelsElement) + if hasattr(axisObject, "minimum"): + axisElement.attrib["minimum"] = self.intOrFloat(axisObject.minimum) + axisElement.attrib["maximum"] = self.intOrFloat(axisObject.maximum) + elif hasattr(axisObject, "values"): + axisElement.attrib["values"] = " ".join( + self.intOrFloat(v) for v in axisObject.values + ) + axisElement.attrib["default"] = self.intOrFloat(axisObject.default) + if axisObject.hidden: + axisElement.attrib["hidden"] = "1" + self.root.findall(".axes")[0].append(axisElement) + + def _addAxisMapping(self, mappingsElement, mappingObject): + mappingElement = ET.Element("mapping") + if getattr(mappingObject, "description", None) is not None: + mappingElement.attrib["description"] = mappingObject.description + for what in ("inputLocation", "outputLocation"): + whatObject = getattr(mappingObject, what, None) + if whatObject is None: + continue + whatElement = ET.Element(what[:-8]) + mappingElement.append(whatElement) + + for name, value in whatObject.items(): + dimensionElement = ET.Element("dimension") + dimensionElement.attrib["name"] = name + dimensionElement.attrib["xvalue"] = self.intOrFloat(value) + whatElement.append(dimensionElement) + + mappingsElement.append(mappingElement) + + def _addAxisLabel( + self, axisElement: ET.Element, label: AxisLabelDescriptor + ) -> None: + labelElement = ET.Element("label") + labelElement.attrib["uservalue"] = self.intOrFloat(label.userValue) + if label.userMinimum is not None: + labelElement.attrib["userminimum"] = self.intOrFloat(label.userMinimum) + if label.userMaximum is not None: + labelElement.attrib["usermaximum"] = self.intOrFloat(label.userMaximum) + labelElement.attrib["name"] = label.name + if label.elidable: + labelElement.attrib["elidable"] = "true" + if label.olderSibling: + labelElement.attrib["oldersibling"] = "true" + if label.linkedUserValue is not None: + labelElement.attrib["linkeduservalue"] = self.intOrFloat( + label.linkedUserValue + ) + self._addLabelNames(labelElement, label.labelNames) + axisElement.append(labelElement) + + def _addLabelNames(self, parentElement, labelNames): + for languageCode, labelName in sorted(labelNames.items()): + languageElement = ET.Element("labelname") + languageElement.attrib[XML_LANG] = languageCode + languageElement.text = labelName + parentElement.append(languageElement) + + def _addLocationLabel( + self, parentElement: ET.Element, label: LocationLabelDescriptor + ) -> None: + labelElement = ET.Element("label") + labelElement.attrib["name"] = label.name + if label.elidable: + labelElement.attrib["elidable"] = "true" + if label.olderSibling: + labelElement.attrib["oldersibling"] = "true" + self._addLabelNames(labelElement, label.labelNames) + self._addLocationElement(labelElement, userLocation=label.userLocation) + parentElement.append(labelElement) + + def _addLocationElement( + self, + parentElement, + *, + designLocation: AnisotropicLocationDict = None, + userLocation: SimpleLocationDict = None, + ): + locElement = ET.Element("location") + for axis in self.documentObject.axes: + if designLocation is not None and axis.name in designLocation: + dimElement = ET.Element("dimension") + dimElement.attrib["name"] = axis.name + value = designLocation[axis.name] + if isinstance(value, tuple): + dimElement.attrib["xvalue"] = self.intOrFloat(value[0]) + dimElement.attrib["yvalue"] = self.intOrFloat(value[1]) + else: + dimElement.attrib["xvalue"] = self.intOrFloat(value) + locElement.append(dimElement) + elif userLocation is not None and axis.name in userLocation: + dimElement = ET.Element("dimension") + dimElement.attrib["name"] = axis.name + value = userLocation[axis.name] + dimElement.attrib["uservalue"] = self.intOrFloat(value) + locElement.append(dimElement) + if len(locElement) > 0: + parentElement.append(locElement) + + def _addInstance(self, instanceObject): + instanceElement = ET.Element("instance") + if instanceObject.name is not None: + instanceElement.attrib["name"] = instanceObject.name + if instanceObject.locationLabel is not None: + instanceElement.attrib["location"] = instanceObject.locationLabel + if instanceObject.familyName is not None: + instanceElement.attrib["familyname"] = instanceObject.familyName + if instanceObject.styleName is not None: + instanceElement.attrib["stylename"] = instanceObject.styleName + # add localisations + if instanceObject.localisedStyleName: + languageCodes = list(instanceObject.localisedStyleName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue # already stored in the element attribute + localisedStyleNameElement = ET.Element("stylename") + localisedStyleNameElement.attrib[XML_LANG] = code + localisedStyleNameElement.text = instanceObject.getStyleName(code) + instanceElement.append(localisedStyleNameElement) + if instanceObject.localisedFamilyName: + languageCodes = list(instanceObject.localisedFamilyName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue # already stored in the element attribute + localisedFamilyNameElement = ET.Element("familyname") + localisedFamilyNameElement.attrib[XML_LANG] = code + localisedFamilyNameElement.text = instanceObject.getFamilyName(code) + instanceElement.append(localisedFamilyNameElement) + if instanceObject.localisedStyleMapStyleName: + languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue + localisedStyleMapStyleNameElement = ET.Element("stylemapstylename") + localisedStyleMapStyleNameElement.attrib[XML_LANG] = code + localisedStyleMapStyleNameElement.text = ( + instanceObject.getStyleMapStyleName(code) + ) + instanceElement.append(localisedStyleMapStyleNameElement) + if instanceObject.localisedStyleMapFamilyName: + languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue + localisedStyleMapFamilyNameElement = ET.Element("stylemapfamilyname") + localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code + localisedStyleMapFamilyNameElement.text = ( + instanceObject.getStyleMapFamilyName(code) + ) + instanceElement.append(localisedStyleMapFamilyNameElement) + + if self.effectiveFormatTuple >= (5, 0): + if instanceObject.locationLabel is None: + self._addLocationElement( + instanceElement, + designLocation=instanceObject.designLocation, + userLocation=instanceObject.userLocation, + ) + else: + # Pre-version 5.0 code was validating and filling in the location + # dict while writing it out, as preserved below. + if instanceObject.location is not None: + locationElement, instanceObject.location = self._makeLocationElement( + instanceObject.location + ) + instanceElement.append(locationElement) + if instanceObject.filename is not None: + instanceElement.attrib["filename"] = instanceObject.filename + if instanceObject.postScriptFontName is not None: + instanceElement.attrib["postscriptfontname"] = ( + instanceObject.postScriptFontName + ) + if instanceObject.styleMapFamilyName is not None: + instanceElement.attrib["stylemapfamilyname"] = ( + instanceObject.styleMapFamilyName + ) + if instanceObject.styleMapStyleName is not None: + instanceElement.attrib["stylemapstylename"] = ( + instanceObject.styleMapStyleName + ) + if self.effectiveFormatTuple < (5, 0): + # Deprecated members as of version 5.0 + if instanceObject.glyphs: + if instanceElement.findall(".glyphs") == []: + glyphsElement = ET.Element("glyphs") + instanceElement.append(glyphsElement) + glyphsElement = instanceElement.findall(".glyphs")[0] + for glyphName, data in sorted(instanceObject.glyphs.items()): + glyphElement = self._writeGlyphElement( + instanceElement, instanceObject, glyphName, data + ) + glyphsElement.append(glyphElement) + if instanceObject.kerning: + kerningElement = ET.Element("kerning") + instanceElement.append(kerningElement) + if instanceObject.info: + infoElement = ET.Element("info") + instanceElement.append(infoElement) + self._addLib(instanceElement, instanceObject.lib, 4) + self.root.findall(".instances")[0].append(instanceElement) + + def _addSource(self, sourceObject): + sourceElement = ET.Element("source") + if sourceObject.filename is not None: + sourceElement.attrib["filename"] = sourceObject.filename + if sourceObject.name is not None: + if sourceObject.name.find("temp_master") != 0: + # do not save temporary source names + sourceElement.attrib["name"] = sourceObject.name + if sourceObject.familyName is not None: + sourceElement.attrib["familyname"] = sourceObject.familyName + if sourceObject.styleName is not None: + sourceElement.attrib["stylename"] = sourceObject.styleName + if sourceObject.layerName is not None: + sourceElement.attrib["layer"] = sourceObject.layerName + if sourceObject.localisedFamilyName: + languageCodes = list(sourceObject.localisedFamilyName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue # already stored in the element attribute + localisedFamilyNameElement = ET.Element("familyname") + localisedFamilyNameElement.attrib[XML_LANG] = code + localisedFamilyNameElement.text = sourceObject.getFamilyName(code) + sourceElement.append(localisedFamilyNameElement) + if sourceObject.copyLib: + libElement = ET.Element("lib") + libElement.attrib["copy"] = "1" + sourceElement.append(libElement) + if sourceObject.copyGroups: + groupsElement = ET.Element("groups") + groupsElement.attrib["copy"] = "1" + sourceElement.append(groupsElement) + if sourceObject.copyFeatures: + featuresElement = ET.Element("features") + featuresElement.attrib["copy"] = "1" + sourceElement.append(featuresElement) + if sourceObject.copyInfo or sourceObject.muteInfo: + infoElement = ET.Element("info") + if sourceObject.copyInfo: + infoElement.attrib["copy"] = "1" + if sourceObject.muteInfo: + infoElement.attrib["mute"] = "1" + sourceElement.append(infoElement) + if sourceObject.muteKerning: + kerningElement = ET.Element("kerning") + kerningElement.attrib["mute"] = "1" + sourceElement.append(kerningElement) + if sourceObject.mutedGlyphNames: + for name in sourceObject.mutedGlyphNames: + glyphElement = ET.Element("glyph") + glyphElement.attrib["name"] = name + glyphElement.attrib["mute"] = "1" + sourceElement.append(glyphElement) + if self.effectiveFormatTuple >= (5, 0): + self._addLocationElement( + sourceElement, designLocation=sourceObject.location + ) + else: + # Pre-version 5.0 code was validating and filling in the location + # dict while writing it out, as preserved below. + locationElement, sourceObject.location = self._makeLocationElement( + sourceObject.location + ) + sourceElement.append(locationElement) + self.root.findall(".sources")[0].append(sourceElement) + + def _addVariableFont( + self, parentElement: ET.Element, vf: VariableFontDescriptor + ) -> None: + vfElement = ET.Element("variable-font") + vfElement.attrib["name"] = vf.name + if vf.filename is not None: + vfElement.attrib["filename"] = vf.filename + if vf.axisSubsets: + subsetsElement = ET.Element("axis-subsets") + for subset in vf.axisSubsets: + subsetElement = ET.Element("axis-subset") + subsetElement.attrib["name"] = subset.name + # Mypy doesn't support narrowing union types via hasattr() + # https://mypy.readthedocs.io/en/stable/type_narrowing.html + # TODO(Python 3.10): use TypeGuard + if hasattr(subset, "userMinimum"): + subset = cast(RangeAxisSubsetDescriptor, subset) + if subset.userMinimum != -math.inf: + subsetElement.attrib["userminimum"] = self.intOrFloat( + subset.userMinimum + ) + if subset.userMaximum != math.inf: + subsetElement.attrib["usermaximum"] = self.intOrFloat( + subset.userMaximum + ) + if subset.userDefault is not None: + subsetElement.attrib["userdefault"] = self.intOrFloat( + subset.userDefault + ) + elif hasattr(subset, "userValue"): + subset = cast(ValueAxisSubsetDescriptor, subset) + subsetElement.attrib["uservalue"] = self.intOrFloat( + subset.userValue + ) + subsetsElement.append(subsetElement) + vfElement.append(subsetsElement) + self._addLib(vfElement, vf.lib, 4) + parentElement.append(vfElement) + + def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None: + if not data: + return + libElement = ET.Element("lib") + libElement.append(plistlib.totree(data, indent_level=indent_level)) + parentElement.append(libElement) + + def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data): + glyphElement = ET.Element("glyph") + if data.get("mute"): + glyphElement.attrib["mute"] = "1" + if data.get("unicodes") is not None: + glyphElement.attrib["unicode"] = " ".join( + [hex(u) for u in data.get("unicodes")] + ) + if data.get("instanceLocation") is not None: + locationElement, data["instanceLocation"] = self._makeLocationElement( + data.get("instanceLocation") + ) + glyphElement.append(locationElement) + if glyphName is not None: + glyphElement.attrib["name"] = glyphName + if data.get("note") is not None: + noteElement = ET.Element("note") + noteElement.text = data.get("note") + glyphElement.append(noteElement) + if data.get("masters") is not None: + mastersElement = ET.Element("masters") + for m in data.get("masters"): + masterElement = ET.Element("master") + if m.get("glyphName") is not None: + masterElement.attrib["glyphname"] = m.get("glyphName") + if m.get("font") is not None: + masterElement.attrib["source"] = m.get("font") + if m.get("location") is not None: + locationElement, m["location"] = self._makeLocationElement( + m.get("location") + ) + masterElement.append(locationElement) + mastersElement.append(masterElement) + glyphElement.append(mastersElement) + return glyphElement + + +class BaseDocReader(LogMixin): + axisDescriptorClass = AxisDescriptor + discreteAxisDescriptorClass = DiscreteAxisDescriptor + axisLabelDescriptorClass = AxisLabelDescriptor + axisMappingDescriptorClass = AxisMappingDescriptor + locationLabelDescriptorClass = LocationLabelDescriptor + ruleDescriptorClass = RuleDescriptor + sourceDescriptorClass = SourceDescriptor + variableFontsDescriptorClass = VariableFontDescriptor + valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor + rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor + instanceDescriptorClass = InstanceDescriptor + + def __init__(self, documentPath, documentObject): + self.path = documentPath + self.documentObject = documentObject + tree = ET.parse(self.path) + self.root = tree.getroot() + self.documentObject.formatVersion = self.root.attrib.get("format", "3.0") + self._axes = [] + self.rules = [] + self.sources = [] + self.instances = [] + self.axisDefaults = {} + self._strictAxisNames = True + + @classmethod + def fromstring(cls, string, documentObject): + f = BytesIO(tobytes(string, encoding="utf-8")) + self = cls(f, documentObject) + self.path = None + return self + + def read(self): + self.readAxes() + self.readLabels() + self.readRules() + self.readVariableFonts() + self.readSources() + self.readInstances() + self.readLib() + + def readRules(self): + # we also need to read any conditions that are outside of a condition set. + rules = [] + rulesElement = self.root.find(".rules") + if rulesElement is not None: + processingValue = rulesElement.attrib.get("processing", "first") + if processingValue not in {"first", "last"}: + raise DesignSpaceDocumentError( + " processing attribute value is not valid: %r, " + "expected 'first' or 'last'" % processingValue + ) + self.documentObject.rulesProcessingLast = processingValue == "last" + for ruleElement in self.root.findall(".rules/rule"): + ruleObject = self.ruleDescriptorClass() + ruleName = ruleObject.name = ruleElement.attrib.get("name") + # read any stray conditions outside a condition set + externalConditions = self._readConditionElements( + ruleElement, + ruleName, + ) + if externalConditions: + ruleObject.conditionSets.append(externalConditions) + self.log.info( + "Found stray rule conditions outside a conditionset. " + "Wrapped them in a new conditionset." + ) + # read the conditionsets + for conditionSetElement in ruleElement.findall(".conditionset"): + conditionSet = self._readConditionElements( + conditionSetElement, + ruleName, + ) + if conditionSet is not None: + ruleObject.conditionSets.append(conditionSet) + for subElement in ruleElement.findall(".sub"): + a = subElement.attrib["name"] + b = subElement.attrib["with"] + ruleObject.subs.append((a, b)) + rules.append(ruleObject) + self.documentObject.rules = rules + + def _readConditionElements(self, parentElement, ruleName=None): + cds = [] + for conditionElement in parentElement.findall(".condition"): + cd = {} + cdMin = conditionElement.attrib.get("minimum") + if cdMin is not None: + cd["minimum"] = float(cdMin) + else: + # will allow these to be None, assume axis.minimum + cd["minimum"] = None + cdMax = conditionElement.attrib.get("maximum") + if cdMax is not None: + cd["maximum"] = float(cdMax) + else: + # will allow these to be None, assume axis.maximum + cd["maximum"] = None + cd["name"] = conditionElement.attrib.get("name") + # # test for things + if cd.get("minimum") is None and cd.get("maximum") is None: + raise DesignSpaceDocumentError( + "condition missing required minimum or maximum in rule" + + (" '%s'" % ruleName if ruleName is not None else "") + ) + cds.append(cd) + return cds + + def readAxes(self): + # read the axes elements, including the warp map. + axesElement = self.root.find(".axes") + if axesElement is not None and "elidedfallbackname" in axesElement.attrib: + self.documentObject.elidedFallbackName = axesElement.attrib[ + "elidedfallbackname" + ] + axisElements = self.root.findall(".axes/axis") + if not axisElements: + return + for axisElement in axisElements: + if ( + self.documentObject.formatTuple >= (5, 0) + and "values" in axisElement.attrib + ): + axisObject = self.discreteAxisDescriptorClass() + axisObject.values = [ + float(s) for s in axisElement.attrib["values"].split(" ") + ] + else: + axisObject = self.axisDescriptorClass() + axisObject.minimum = float(axisElement.attrib.get("minimum")) + axisObject.maximum = float(axisElement.attrib.get("maximum")) + axisObject.default = float(axisElement.attrib.get("default")) + axisObject.name = axisElement.attrib.get("name") + if axisElement.attrib.get("hidden", False): + axisObject.hidden = True + axisObject.tag = axisElement.attrib.get("tag") + for mapElement in axisElement.findall("map"): + a = float(mapElement.attrib["input"]) + b = float(mapElement.attrib["output"]) + axisObject.map.append((a, b)) + for labelNameElement in axisElement.findall("labelname"): + # Note: elementtree reads the "xml:lang" attribute name as + # '{http://www.w3.org/XML/1998/namespace}lang' + for key, lang in labelNameElement.items(): + if key == XML_LANG: + axisObject.labelNames[lang] = tostr(labelNameElement.text) + labelElement = axisElement.find(".labels") + if labelElement is not None: + if "ordering" in labelElement.attrib: + axisObject.axisOrdering = int(labelElement.attrib["ordering"]) + for label in labelElement.findall(".label"): + axisObject.axisLabels.append(self.readAxisLabel(label)) + self.documentObject.axes.append(axisObject) + self.axisDefaults[axisObject.name] = axisObject.default + + self.documentObject.axisMappings = [] + for mappingsElement in self.root.findall(".axes/mappings"): + groupDescription = mappingsElement.attrib.get("description") + for mappingElement in mappingsElement.findall("mapping"): + description = mappingElement.attrib.get("description") + inputElement = mappingElement.find("input") + outputElement = mappingElement.find("output") + inputLoc = {} + outputLoc = {} + for dimElement in inputElement.findall(".dimension"): + name = dimElement.attrib["name"] + value = float(dimElement.attrib["xvalue"]) + inputLoc[name] = value + for dimElement in outputElement.findall(".dimension"): + name = dimElement.attrib["name"] + value = float(dimElement.attrib["xvalue"]) + outputLoc[name] = value + axisMappingObject = self.axisMappingDescriptorClass( + inputLocation=inputLoc, + outputLocation=outputLoc, + description=description, + groupDescription=groupDescription, + ) + self.documentObject.axisMappings.append(axisMappingObject) + + def readAxisLabel(self, element: ET.Element): + xml_attrs = { + "userminimum", + "uservalue", + "usermaximum", + "name", + "elidable", + "oldersibling", + "linkeduservalue", + } + unknown_attrs = set(element.attrib) - xml_attrs + if unknown_attrs: + raise DesignSpaceDocumentError( + f"label element contains unknown attributes: {', '.join(unknown_attrs)}" + ) + + name = element.get("name") + if name is None: + raise DesignSpaceDocumentError("label element must have a name attribute.") + valueStr = element.get("uservalue") + if valueStr is None: + raise DesignSpaceDocumentError( + "label element must have a uservalue attribute." + ) + value = float(valueStr) + minimumStr = element.get("userminimum") + minimum = float(minimumStr) if minimumStr is not None else None + maximumStr = element.get("usermaximum") + maximum = float(maximumStr) if maximumStr is not None else None + linkedValueStr = element.get("linkeduservalue") + linkedValue = float(linkedValueStr) if linkedValueStr is not None else None + elidable = True if element.get("elidable") == "true" else False + olderSibling = True if element.get("oldersibling") == "true" else False + labelNames = { + lang: label_name.text or "" + for label_name in element.findall("labelname") + for attr, lang in label_name.items() + if attr == XML_LANG + # Note: elementtree reads the "xml:lang" attribute name as + # '{http://www.w3.org/XML/1998/namespace}lang' + } + return self.axisLabelDescriptorClass( + name=name, + userValue=value, + userMinimum=minimum, + userMaximum=maximum, + elidable=elidable, + olderSibling=olderSibling, + linkedUserValue=linkedValue, + labelNames=labelNames, + ) + + def readLabels(self): + if self.documentObject.formatTuple < (5, 0): + return + + xml_attrs = {"name", "elidable", "oldersibling"} + for labelElement in self.root.findall(".labels/label"): + unknown_attrs = set(labelElement.attrib) - xml_attrs + if unknown_attrs: + raise DesignSpaceDocumentError( + f"Label element contains unknown attributes: {', '.join(unknown_attrs)}" + ) + + name = labelElement.get("name") + if name is None: + raise DesignSpaceDocumentError( + "label element must have a name attribute." + ) + designLocation, userLocation = self.locationFromElement(labelElement) + if designLocation: + raise DesignSpaceDocumentError( + f'